forked from ZonglinY/ECBRF_Case_Based_Reasoning_with_PLM
-
Notifications
You must be signed in to change notification settings - Fork 0
/
comet-case-based-train-updating-retriever-twoEmbedder-DPR-faster-May-TST.py
1804 lines (1710 loc) · 115 KB
/
comet-case-based-train-updating-retriever-twoEmbedder-DPR-faster-May-TST.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import argparse, logging, os, sys, random, datetime, math, time, shutil, copy
os.environ["CUDA_VISIBLE_DEVICES"]="2,3"
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
sys.path.insert(0, "..")
from transformers import (CONFIG_NAME, WEIGHTS_NAME, AdamW,
get_linear_schedule_with_warmup)
# from transformers import (BertLMHeadModel, BertTokenizer, BertConfig)
from transformers import (GPT2LMHeadModel, GPT2Tokenizer, GPT2Config)
from transformers import (BartForConditionalGeneration, BartTokenizer, BartConfig)
# from transformers import (T5ForConditionalGeneration, T5Tokenizer, T5Config)
from transformers import (DPRQuestionEncoder, DPRContextEncoder, DPRQuestionEncoderTokenizer, DPRContextEncoderTokenizer)
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from tqdm import tqdm
import pickle
from torch.utils.tensorboard import SummaryWriter
from utils_TST import (load_conceptnet_pure, load_atomic_pure, load_shakespear, load_e2e,
save_model, add_special_tokens, tokenize_and_encode, set_seed,
preprocess_datasets_for_generator_and_retriever_and_retriever_doc_ProperEOS,
concat_cur_bundle_and_encoded_cases_EOSfixed_Bart,
concat_cur_bundle_and_encoded_cases_EOSfixed_Bart_randomly_mask_demonstrations,
get_path_cur_next_bundle, find_path_tensor_dataset,
wait_get_remove_cases_for_bundle_while_deleting_bad_cases_file, shift_tokens_right)
# from utils_TST import (concat_cur_bundle_and_encoded_cases_EOSfixed_Bart_COMETNoNeedRetriever, concat_cur_bundle_and_encoded_cases_EOSfixed_COMETNoNeedRetriever,
# concat_cur_bundle_and_encoded_cases_EOSfixed_randomly_mask_demonstrations, concat_cur_bundle_and_encoded_cases_EOSfixed)
from utils_baseline import load_sentiment_data
logging.basicConfig(format = "%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt = "%m/%d/%Y %H:%M:%S",
level = logging.INFO)
logger = logging.getLogger(__name__)
device1 = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
device2 = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
# case_aug_cur_bundle: [case_aug_gene_input_id, case_aug_gene_attention_mask, case_aug_gene_lm_labels, \
# doc_retr_cases_input_ids, doc_retr_cases_attention_mask, doc_retr_cases_segment_ids, \
# input_retr_input_ids, input_retr_attention_mask, input_retr_segment_ids]
# case_aug_gene_input_id: [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
# case_aug_gene_attention_mask: [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
# case_aug_gene_lm_labels: [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
# doc_retr_cases_input_ids: [batch_size, n_doc, cases_per_doc, input_len_retr] (not changed)
# input_retr_input_ids: [batch_size, input_len_retr]
def batch_step(args, model_generator, model_retriever, model_retriever_doc, batch, tokenizer_gene, epsilon):
## prepare input data
# batch_gene = tuple(t.to(device1) for t in batch[0:3])
# batch_retr_cases = tuple(t.to(device2) for t in batch[3:6])
batch_gene = batch[0:3]
batch_retr_cases = batch[3:6]
batch_retr_cur_input = tuple(t.to(device2) for t in batch[6:9])
# batch_retr_cur_input = batch[6:9]
# [batch_size, n_doc, cases_per_doc, input_len_retr]
## batch_retr_cases
doc_retr_cases_input_ids, doc_retr_cases_attention_mask, doc_retr_cases_segment_ids = batch_retr_cases
cases_per_doc, input_len_retr = doc_retr_cases_input_ids.size()[2], doc_retr_cases_input_ids.size()[3]
# view retr_cases to fit the requirement of bert's input
resh_doc_retr_cases_input_ids = doc_retr_cases_input_ids.view(-1, input_len_retr).to(device2)
resh_doc_retr_cases_attention_mask = doc_retr_cases_attention_mask.view(-1, input_len_retr).to(device2)
resh_doc_retr_cases_segment_ids = doc_retr_cases_segment_ids.view(-1, input_len_retr).to(device2)
# [batch_size, input_len_retr]
input_retr_input_ids, input_retr_attention_mask, input_retr_segment_ids = batch_retr_cur_input
# [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
## batch_gene
case_aug_gene_input_id, case_aug_gene_attention_mask, case_aug_gene_lm_labels = batch_gene
batch_size, n_doc, tgt_len_gene = case_aug_gene_input_id.size()[0], case_aug_gene_input_id.size()[1], case_aug_gene_input_id.size()[-1]
# view case_aug_gene to fit the requirement of gpt2's input
resh_case_aug_gene_input_id = case_aug_gene_input_id.view(-1, tgt_len_gene).to(device1)
resh_case_aug_gene_attention_mask = case_aug_gene_attention_mask.view(-1, tgt_len_gene).to(device1)
resh_case_aug_gene_lm_labels = case_aug_gene_lm_labels.view(-1, tgt_len_gene).to(device1)
### model_retriever
## batch_retr_cases
outputs_retr_cases = model_retriever_doc(resh_doc_retr_cases_input_ids, attention_mask=resh_doc_retr_cases_attention_mask, token_type_ids=resh_doc_retr_cases_segment_ids)
# pooled_embedding_retr_cases: [48, 768], verified
pooled_embedding_retr_cases = outputs_retr_cases[0]
# print('pooled_embedding_retr_cases.size(): ', pooled_embedding_retr_cases.size())
# pooled_embedding_retr_cases: [batch_size, n_doc, cases_per_doc, 768]
pooled_embedding_retr_cases = pooled_embedding_retr_cases.view(batch_size, n_doc, cases_per_doc, -1)
# print('pooled_embedding_retr_cases.size(): ', pooled_embedding_retr_cases.size())
# print('pooled_embedding_retr_cases.size(): ', pooled_embedding_retr_cases.size())
## batch_retr_cur_input
outputs_cur_batch = model_retriever(input_retr_input_ids, attention_mask=input_retr_attention_mask, token_type_ids=input_retr_segment_ids)
# pooled_embedding_cur_batch: [batch_size, 768]
pooled_embedding_cur_batch = outputs_cur_batch[0]
# pooled_embedding_cur_batch: [4, 768], verified
# print('pooled_embedding_cur_batch.size(): ', pooled_embedding_cur_batch.size())
# pooled_embedding_cur_batch: [batch_size, 1, 1, 768]
pooled_embedding_cur_batch = pooled_embedding_cur_batch.unsqueeze(1).unsqueeze(2)
# print('pooled_embedding_cur_batch.size()', pooled_embedding_cur_batch.size())
## Get similarity score: [batch_size, n_doc, cases_per_doc, 768]
simi_score = pooled_embedding_retr_cases * pooled_embedding_cur_batch
# simi_score: [batch_size, n_doc, cases_per_doc]
simi_score = torch.sum(simi_score, dim=3)
# Q:
torch.save(simi_score, os.path.join(args.output_dir, 'simi_score.pt'))
batch_size, n_doc, cases_per_doc = simi_score.size()[0], simi_score.size()[1], simi_score.size()[2]
## Q: one method to calculate simi_prob; possible to be one reason for nan
simi_score = simi_score.view(batch_size, -1)
# SP: newly added, to fix the unbalanced distribution of simi_prob
simi_score = simi_score / 10
if args.rand_simi_score:
ori_seq_list = list(range(len(simi_score)))
new_seq_list = random.sample(ori_seq_list, len(simi_score))
new_seq_tensor = torch.tensor(new_seq_list).to(torch.long)
simi_score = simi_score[new_seq_tensor]
simi_prob = F.softmax(simi_score, dim=1)
simi_prob = simi_prob.view(batch_size, n_doc, cases_per_doc)
# simi_prob: [batch_size, n_doc]
simi_prob = torch.sum(simi_prob, dim=2)
# # Another method to calculate simi_prob
# simi_score = simi_score.sum(dim=2)
# simi_prob = F.softmax(simi_score, dim=1)
if not F.relu(simi_prob.sum() - simi_prob.size()[0]) < 0.01:
print('simi_prob:', simi_prob)
print('Warning: F.relu(simi_prob.sum() - simi_prob.size()[0]) < 0.01!!!!!!!!!!!!!!!!!')
# doc_logprobs: [batch_size, n_doc, 1]
doc_logprobs = torch.log(simi_prob).unsqueeze(-1)
###############
# another simpler option; can be used for debugging
### model_generator
# if "bart" in args.generator_model_type:
# results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, labels=resh_case_aug_gene_lm_labels)
# elif "bert" in args.generator_model_type or "gpt" in args.generator_model_type:
# results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, labels=resh_case_aug_gene_lm_labels)
# nll_loss = results[0]
# loss = nll_loss
# seq_logits = results[1].to(device2)
# seq_logits = seq_logits.contiguous()
# seq_logprobs = F.log_softmax(seq_logits, dim=-1).view(seq_logits.shape[0] // args.n_doc, args.n_doc, -1, seq_logits.shape[-1])
###############
if "t5" in args.generator_model_type:
decoder_input_ids = model_generator._shift_right(resh_case_aug_gene_lm_labels)
results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, decoder_input_ids=decoder_input_ids)
elif "bart" in args.generator_model_type:
decoder_input_ids = shift_tokens_right(resh_case_aug_gene_lm_labels, model_generator.config.pad_token_id, model_generator.config.decoder_start_token_id)
results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, decoder_input_ids=decoder_input_ids)
else:
results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask)
# logits/seq_logits: [batch_size * n_doc, tgt_length, #vocab]
logits = results[0]
seq_logits = logits.to(device2)
if args.generator_model_type == "gpt2-lmhead" or "bert" in args.generator_model_type:
# !!! use these three lines to fix the loss calculating bug of GPT2LMHeadModel
seq_logits = seq_logits[..., :-1, :].contiguous()
shift_labels = resh_case_aug_gene_lm_labels[..., 1:].contiguous()
elif "bart" in args.generator_model_type or "t5" in args.generator_model_type:
seq_logits = seq_logits.contiguous()
shift_labels = resh_case_aug_gene_lm_labels.contiguous()
else:
raise NotImplementError
case_aug_gene_lm_labels = shift_labels.view(shift_labels.shape[0] // args.n_doc, args.n_doc, -1)
# seq_logprobs: [batch_size, n_doc, tgt_length, #vocab]
seq_logprobs = F.log_softmax(seq_logits, dim=-1).view(seq_logits.shape[0] // args.n_doc, args.n_doc, -1, seq_logits.shape[-1])
# maybe this line of code is not necessary, but it should be ok to keep it
seq_logprobs_backup = copy.deepcopy(seq_logprobs)
## calculate loss
# case_aug_gene_lm_labels: [batch_size, n_doc, tgt_length, 1]
case_aug_gene_lm_labels = case_aug_gene_lm_labels.unsqueeze(-1).to(device2)
# change -100 in case_aug_gene_lm_labels to tokenizer_gene.encode(tokenizer_gene.pad_token)[0]
case_aug_gene_lm_labels_if_pad = case_aug_gene_lm_labels.eq(-100)
# num_not_pad_in_labels: [batch_size, n_doc, 1]
num_not_pad_in_labels = case_aug_gene_lm_labels_if_pad.logical_not().to(torch.float).sum(2)
case_aug_gene_lm_labels.masked_fill_(case_aug_gene_lm_labels_if_pad, tokenizer_gene.encode(tokenizer_gene.pad_token)[0])
# ll: [batch_size, n_doc, tgt_length, 1], some of the logprob are from [PAD] token
ll = seq_logprobs.gather(dim=-1, index=case_aug_gene_lm_labels)
# total sum of all (normalised) logits
smooth_obj = seq_logprobs.sum(dim=-1, keepdim=True)
# do not count the logprob of token whose id equals pad id
# ll: [batch_size, n_doc, tgt_length, 1]
ll.masked_fill_(case_aug_gene_lm_labels_if_pad, 0)
smooth_obj.masked_fill_(case_aug_gene_lm_labels_if_pad, 0)
# p_doc come to play; Marginalize
# ll: [batch_size, n_doc, tgt_length, 1]; ll2: [batch_size, n_doc, 1]
# ll2: 1/n * \sum_{log(Pw_i)}
ll2 = ll.sum(2) / num_not_pad_in_labels
assert ll2.size() == num_not_pad_in_labels.size()
# doc_logprobs: [batch_size, n_doc, 1]
ll2 = ll2 + doc_logprobs
smooth_obj = smooth_obj.sum(2)
smooth_obj = smooth_obj + doc_logprobs
# Marginalize over docs
# ll3: [batch_size, 1]
ll3 = ll2.logsumexp(1)
smooth_obj = smooth_obj.logsumexp(1)
nll_loss = -ll3
smooth_loss = -smooth_obj
bool_if_positive = (nll_loss > -0.05)
# Q:
torch.save(simi_prob.cpu(), os.path.join(args.output_dir, 'simi_prob.pt'))
if bool_if_positive.to(torch.float).mean() != 1:
raise Exception
# print('nll_loss:', nll_loss)
# print('nll_loss.size():', nll_loss.size())
# Found Problem: since some token is [PAD], mean() will do average over these [PAD] tokens, but we should overlook [PAD] tokens
nll_loss = nll_loss.mean()
smooth_loss = smooth_loss.mean()
eps_i = epsilon / seq_logprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
# seq_logprobs_backup: [batch_size, n_doc, seq_length, len_words]
# calculate accuracy when the dataset is "sentiment sentence classification" dataset
if args.dataset_selection == 4 or args.dataset_selection == 5 or args.dataset_selection == 6 or args.dataset_selection == 7:
# print("seq_logprobs_backup.size(): ", seq_logprobs_backup.size())
cur_batch_size = seq_logprobs_backup.size()[0]
## true label
true_label = []
# case_aug_gene_lm_labels: [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
## pred label
cnt_correct = 0
for cur_data_id in range(cur_batch_size):
# assume only one doc is considered, else is not implemented yet
if not case_aug_gene_lm_labels.size()[1] == 1:
print("Current code is only designed for the situation where only n_doc is 1.")
# print("case_aug_gene_lm_labels[cur_data_id, 0, :10]: ", case_aug_gene_lm_labels[cur_data_id, 0, :10])
if "bart" in args.generator_model_type:
# cur_true_label = tokenizer_gene.decode(case_aug_gene_lm_labels[cur_data_id, 0, 0]).strip()
# since we have added a <bos> token
cur_true_label = tokenizer_gene.decode(case_aug_gene_lm_labels[cur_data_id, 0, 1]).strip()
else:
raise NotImplementedError
if "bart" in args.generator_model_type:
prob_positive_pred = torch.exp(seq_logprobs_backup[cur_data_id, 0, 1, 22173])
prob_negative_pred = torch.exp(seq_logprobs_backup[cur_data_id, 0, 1, 33407])
prob_neutral_pred = torch.exp(seq_logprobs_backup[cur_data_id, 0, 1, 12516])
prob_0_pred = torch.exp(seq_logprobs_backup[cur_data_id, 0, 1, 288])
prob_1_pred = torch.exp(seq_logprobs_backup[cur_data_id, 0, 1, 134])
prob_2_pred = torch.exp(seq_logprobs_backup[cur_data_id, 0, 1, 176])
prob_3_pred = torch.exp(seq_logprobs_backup[cur_data_id, 0, 1, 246])
prob_4_pred = torch.exp(seq_logprobs_backup[cur_data_id, 0, 1, 306])
else:
raise NotImplementedError
if args.dataset_selection == 4 or args.dataset_selection == 7:
if not (cur_true_label == 'positive' or cur_true_label == 'negative'):
raise Exception("Not acceptable cur_true_label: {}, it should either be positive or negative.".format(cur_true_label))
if (prob_positive_pred >= prob_negative_pred and cur_true_label == 'positive') or (prob_positive_pred < prob_negative_pred and cur_true_label == 'negative'):
cnt_correct += 1
elif args.dataset_selection == 5:
if not (cur_true_label == 'positive' or cur_true_label == 'negative' or cur_true_label == 'neutral'):
raise Exception("Not acceptable cur_true_label: {}, it should either be positive or negative or neutral.".format(cur_true_label))
if (prob_positive_pred >= prob_negative_pred and prob_positive_pred >= prob_neutral_pred and cur_true_label == 'positive') or (prob_negative_pred > prob_positive_pred and prob_negative_pred > prob_neutral_pred and cur_true_label == 'negative') or (prob_neutral_pred > prob_positive_pred and prob_neutral_pred > prob_negative_pred and cur_true_label == 'neutral'):
cnt_correct += 1
elif args.dataset_selection == 6:
if not (cur_true_label == '0' or cur_true_label == '1' or cur_true_label == '2' or cur_true_label == '3' or cur_true_label == '4'):
raise Exception("Not acceptable cur_true_label: {}, it should be between 0~4.".format(cur_true_label))
if int(cur_true_label) == np.argmax([prob_0_pred.item(), prob_1_pred.item(), prob_2_pred.item(), prob_3_pred.item(), prob_4_pred.item()]):
cnt_correct += 1
batch_accuracy = cnt_correct / cur_batch_size
else:
batch_accuracy = None
return loss, nll_loss, seq_logprobs, doc_logprobs, batch_accuracy
# case_aug_cur_bundle: [case_aug_gene_input_id, case_aug_gene_attention_mask, case_aug_gene_lm_labels, \
# doc_retr_cases_input_ids, doc_retr_cases_attention_mask, doc_retr_cases_segment_ids, \
# input_retr_input_ids, input_retr_attention_mask, input_retr_segment_ids]
# case_aug_gene_input_id: [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
# case_aug_gene_attention_mask: [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
# case_aug_gene_lm_labels: [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
# doc_retr_cases_input_ids: [batch_size, n_doc, cases_per_doc, input_len_retr] (not changed)
# input_retr_input_ids: [batch_size, input_len_retr]
def batch_step_fast_train_1GPU(args, model_generator, model_retriever, model_retriever_doc, batch, tokenizer_gene, epsilon, if_try_one_gpu=False):
## prepare input data
batch_gene = batch[0:3]
# [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
case_aug_gene_input_id, case_aug_gene_attention_mask, case_aug_gene_lm_labels = batch_gene
batch_size, n_doc, tgt_len_gene = case_aug_gene_input_id.size()[0], case_aug_gene_input_id.size()[1], case_aug_gene_input_id.size()[-1]
# view case_aug_gene to fit the requirement of gpt2's input
resh_case_aug_gene_input_id = case_aug_gene_input_id.view(-1, tgt_len_gene).to(device1)
resh_case_aug_gene_attention_mask = case_aug_gene_attention_mask.view(-1, tgt_len_gene).to(device1)
resh_case_aug_gene_lm_labels = case_aug_gene_lm_labels.view(-1, tgt_len_gene).to(device1)
if "t5" in args.generator_model_type:
decoder_input_ids = model_generator._shift_right(resh_case_aug_gene_lm_labels)
results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, decoder_input_ids=decoder_input_ids, labels=resh_case_aug_gene_lm_labels)
elif "bart" in args.generator_model_type:
decoder_input_ids = shift_tokens_right(resh_case_aug_gene_lm_labels, model_generator.config.pad_token_id, model_generator.config.decoder_start_token_id)
results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, decoder_input_ids=decoder_input_ids, labels=resh_case_aug_gene_lm_labels)
else:
results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, labels=resh_case_aug_gene_lm_labels)
# logits/seq_logits: [batch_size * n_doc, tgt_length, #vocab]
nll_loss, logits = results[0], results[1]
if if_try_one_gpu:
nll_loss_device2 = nll_loss
seq_logits = logits
else:
nll_loss_device2 = nll_loss.to(device2)
# seq_logprobs: [batch_size, n_doc, tgt_length, #vocab]
seq_logits = logits.to(device2)
if args.generator_model_type == "gpt2-lmhead" or "bert" in args.generator_model_type:
# !!! use these three lines to fix the loss calculating bug of GPT2LMHeadModel
seq_logits = seq_logits[..., :-1, :].contiguous()
shift_labels = resh_case_aug_gene_lm_labels[..., 1:].contiguous()
elif "bart" in args.generator_model_type or "t5" in args.generator_model_type:
seq_logits = seq_logits.contiguous()
shift_labels = resh_case_aug_gene_lm_labels.contiguous()
else:
raise NotImplementError
## get case_aug_gene_lm_labels_if_pad
case_aug_gene_lm_labels = shift_labels.view(shift_labels.shape[0] // args.n_doc, args.n_doc, -1)
if if_try_one_gpu:
case_aug_gene_lm_labels = case_aug_gene_lm_labels.unsqueeze(-1)
else:
# case_aug_gene_lm_labels: [batch_size, n_doc, tgt_length, 1]
case_aug_gene_lm_labels = case_aug_gene_lm_labels.unsqueeze(-1).to(device2)
# change -100 in case_aug_gene_lm_labels to tokenizer_gene.encode(tokenizer_gene.pad_token)[0]
case_aug_gene_lm_labels_if_pad = case_aug_gene_lm_labels.eq(-100)
## get smooth_loss
seq_logprobs = F.log_softmax(seq_logits, dim=-1).view(seq_logits.shape[0] // args.n_doc, args.n_doc, -1, seq_logits.shape[-1])
smooth_obj = seq_logprobs.sum(dim=-1, keepdim=True)
smooth_obj.masked_fill_(case_aug_gene_lm_labels_if_pad, 0)
smooth_obj = smooth_obj.sum(2)
# smooth_obj = smooth_obj + doc_logprobs
smooth_obj = smooth_obj.logsumexp(1)
smooth_loss = -smooth_obj
smooth_loss = smooth_loss.mean()
## get total loss
eps_i = epsilon / seq_logprobs.size(-1)
loss = (1.0 - epsilon) * nll_loss_device2 + eps_i * smooth_loss
# seq_logprobs: [batch_size, n_doc, seq_length, len_words]
# calculate accuracy when the dataset is "sentiment sentence classification" dataset
if args.dataset_selection == 4 or args.dataset_selection == 5 or args.dataset_selection == 6 or args.dataset_selection == 7:
# print("seq_logprobs.size(): ", seq_logprobs.size())
cur_batch_size = seq_logprobs.size()[0]
## true label
true_label = []
# case_aug_gene_lm_labels: [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
## pred label
cnt_correct = 0
for cur_data_id in range(cur_batch_size):
# assume only one doc is considered, else is not implemented yet
if not case_aug_gene_lm_labels.size()[1] == 1:
print("Current code is only designed for the situation where only n_doc is 1.")
# print("case_aug_gene_lm_labels[cur_data_id, 0, :10]: ", case_aug_gene_lm_labels[cur_data_id, 0, :10])
if "bart" in args.generator_model_type:
# cur_true_label = tokenizer_gene.decode(case_aug_gene_lm_labels[cur_data_id, 0, 0]).strip()
# since we have added a <bos> token
cur_true_label = tokenizer_gene.decode(case_aug_gene_lm_labels[cur_data_id, 0, 1]).strip()
else:
raise NotImplementedError
if "bart" in args.generator_model_type:
prob_positive_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 22173])
prob_negative_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 33407])
prob_neutral_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 12516])
prob_0_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 288])
prob_1_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 134])
prob_2_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 176])
prob_3_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 246])
prob_4_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 306])
else:
raise NotImplementedError
if args.dataset_selection == 4 or args.dataset_selection == 7:
if not (cur_true_label == 'positive' or cur_true_label == 'negative'):
raise Exception("Not acceptable cur_true_label: {}, it should either be positive or negative.".format(cur_true_label))
if (prob_positive_pred >= prob_negative_pred and cur_true_label == 'positive') or (prob_positive_pred < prob_negative_pred and cur_true_label == 'negative'):
cnt_correct += 1
elif args.dataset_selection == 5:
if not (cur_true_label == 'positive' or cur_true_label == 'negative' or cur_true_label == 'neutral'):
raise Exception("Not acceptable cur_true_label: {}, it should either be positive or negative or neutral.".format(cur_true_label))
if (prob_positive_pred >= prob_negative_pred and prob_positive_pred >= prob_neutral_pred and cur_true_label == 'positive') or (prob_negative_pred > prob_positive_pred and prob_negative_pred > prob_neutral_pred and cur_true_label == 'negative') or (prob_neutral_pred > prob_positive_pred and prob_neutral_pred > prob_negative_pred and cur_true_label == 'neutral'):
cnt_correct += 1
elif args.dataset_selection == 6:
if not (cur_true_label == '0' or cur_true_label == '1' or cur_true_label == '2' or cur_true_label == '3' or cur_true_label == '4'):
raise Exception("Not acceptable cur_true_label: {}, it should be between 0~4.".format(cur_true_label))
if int(cur_true_label) == np.argmax([prob_0_pred.item(), prob_1_pred.item(), prob_2_pred.item(), prob_3_pred.item(), prob_4_pred.item()]):
cnt_correct += 1
batch_accuracy = cnt_correct / cur_batch_size
else:
batch_accuracy = None
return loss, nll_loss_device2, seq_logprobs, batch_accuracy
# batch: [case_aug_gene_input_id, case_aug_gene_attention_mask, case_aug_gene_lm_labels]
# case_aug_gene_input_id: [batch_size, cases_per_doc * input_len_gene + 1 + input_len_gene + 1]
# case_aug_gene_attention_mask: [batch_size, cases_per_doc * input_len_gene + 1 + input_len_gene + 1]
# case_aug_gene_lm_labels: [batch_size, cases_per_doc * input_len_gene + 1 + input_len_gene + 1]
def batch_step_eval_analysis(args, model_generator, model_retriever, model_retriever_doc, batch, tokenizer_gene):
## prepare input data
batch_gene = tuple(t.to(device1) for t in batch[0:3])
batch_retr_cases = batch[3:6]
batch_retr_cur_input = tuple(t.to(device2) for t in batch[6:9])
### Generator ###
# Q: originally here use [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene + 1]
# [batch_size, n_doc, cases_per_doc * input_doc_len_gene + input_cur_len_gene]
case_aug_gene_input_id, case_aug_gene_attention_mask, case_aug_gene_lm_labels = batch_gene
batch_size, n_doc, tgt_len_gene = case_aug_gene_input_id.size()[0], case_aug_gene_input_id.size()[1], case_aug_gene_input_id.size()[-1]
# print('n_doc:', n_doc)
assert n_doc == 1
# view case_aug_gene to fit the requirement of gpt2's input
resh_case_aug_gene_input_id = case_aug_gene_input_id.view(-1, tgt_len_gene)
resh_case_aug_gene_attention_mask = case_aug_gene_attention_mask.view(-1, tgt_len_gene)
resh_case_aug_gene_lm_labels = case_aug_gene_lm_labels.view(-1, tgt_len_gene)
## model_generator
if "gpt2" in args.generator_model_type or "bert" in args.generator_model_type:
results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, labels=resh_case_aug_gene_lm_labels)
elif "bart" in args.generator_model_type or "t5" in args.generator_model_type:
results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, labels=resh_case_aug_gene_lm_labels)
else:
raise NotImplementError
# logits/seq_logits: [batch_size * n_doc, tgt_length, #vocab]
loss, logits = results[0], results[1]
### Retriever_doc ###
# [batch_size, n_doc, cases_per_doc, input_len_retr]
## batch_retr_cases
doc_retr_cases_input_ids, doc_retr_cases_attention_mask, doc_retr_cases_segment_ids = batch_retr_cases
cases_per_doc, input_len_retr = doc_retr_cases_input_ids.size()[2], doc_retr_cases_input_ids.size()[3]
# view retr_cases to fit the requirement of bert's input
resh_doc_retr_cases_input_ids = doc_retr_cases_input_ids.view(-1, input_len_retr).to(device2)
resh_doc_retr_cases_attention_mask = doc_retr_cases_attention_mask.view(-1, input_len_retr).to(device2)
resh_doc_retr_cases_segment_ids = doc_retr_cases_segment_ids.view(-1, input_len_retr).to(device2)
## batch_retr_cases
outputs_retr_cases = model_retriever_doc(resh_doc_retr_cases_input_ids, attention_mask=resh_doc_retr_cases_attention_mask, token_type_ids=resh_doc_retr_cases_segment_ids)
# pooled_embedding_retr_cases: [48, 768], verified
pooled_embedding_retr_cases = outputs_retr_cases[0]
# print('pooled_embedding_retr_cases.size(): ', pooled_embedding_retr_cases.size())
# pooled_embedding_retr_cases: [batch_size, n_doc, cases_per_doc, 768]
pooled_embedding_retr_cases = pooled_embedding_retr_cases.view(batch_size, n_doc, cases_per_doc, -1)
# print('pooled_embedding_retr_cases.size(): ', pooled_embedding_retr_cases.size())
### Retriever ###
# [batch_size, input_len_retr]
input_retr_input_ids, input_retr_attention_mask, input_retr_segment_ids = batch_retr_cur_input
## batch_retr_cur_input
outputs_cur_batch = model_retriever(input_retr_input_ids, attention_mask=input_retr_attention_mask, token_type_ids=input_retr_segment_ids)
# pooled_embedding_cur_batch: [batch_size, 768]
pooled_embedding_cur_batch = outputs_cur_batch[0]
# pooled_embedding_cur_batch: [4, 768], verified
# print('pooled_embedding_cur_batch.size(): ', pooled_embedding_cur_batch.size())
# pooled_embedding_cur_batch: [batch_size, 1, 1, 768]
pooled_embedding_cur_batch = pooled_embedding_cur_batch.unsqueeze(1).unsqueeze(2)
# print('pooled_embedding_cur_batch.size(): ', pooled_embedding_cur_batch.size())
### (start) This block of code is only used for sentiment sentence classification dataset
seq_logits = logits
if args.generator_model_type == "gpt2-lmhead" or "bert" in args.generator_model_type:
# !!! use these three lines to fix the loss calculating bug of GPT2LMHeadModel
seq_logits = seq_logits[..., :-1, :].contiguous()
shift_labels = resh_case_aug_gene_lm_labels[..., 1:].contiguous()
elif "bart" in args.generator_model_type or "t5" in args.generator_model_type:
seq_logits = seq_logits.contiguous()
shift_labels = resh_case_aug_gene_lm_labels.contiguous()
seq_logprobs = F.log_softmax(seq_logits, dim=-1).view(seq_logits.shape[0] // args.n_doc, args.n_doc, -1, seq_logits.shape[-1])
# seq_logprobs: [batch_size, n_doc, seq_length, len_words]
# calculate accuracy when the dataset is "sentiment sentence classification" dataset
if args.dataset_selection == 4 or args.dataset_selection == 5 or args.dataset_selection == 6 or args.dataset_selection == 7:
# print("seq_logprobs.size(): ", seq_logprobs.size())
cur_batch_size = seq_logprobs.size()[0]
## true label
true_label = []
# case_aug_gene_lm_labels: [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
## pred label
cnt_correct = 0
for cur_data_id in range(cur_batch_size):
# assume only one doc is considered, else is not implemented yet
if not case_aug_gene_lm_labels.size()[1] == 1:
print("Current code is only designed for the situation where only n_doc is 1.")
# print("case_aug_gene_lm_labels[cur_data_id, 0, :10]: ", case_aug_gene_lm_labels[cur_data_id, 0, :10])
if "bart" in args.generator_model_type:
# cur_true_label = tokenizer_gene.decode(case_aug_gene_lm_labels[cur_data_id, 0, 0]).strip()
# since we have added a <bos> token
cur_true_label = tokenizer_gene.decode(case_aug_gene_lm_labels[cur_data_id, 0, 1]).strip()
else:
raise NotImplementedError
if "bart" in args.generator_model_type:
prob_positive_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 22173])
prob_negative_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 33407])
prob_neutral_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 12516])
prob_0_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 288])
prob_1_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 134])
prob_2_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 176])
prob_3_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 246])
prob_4_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 306])
else:
raise NotImplementedError
if args.dataset_selection == 4 or args.dataset_selection == 7:
if not (cur_true_label == 'positive' or cur_true_label == 'negative'):
raise Exception("Not acceptable cur_true_label: {}, it should either be positive or negative.".format(cur_true_label))
if (prob_positive_pred >= prob_negative_pred and cur_true_label == 'positive') or (prob_positive_pred < prob_negative_pred and cur_true_label == 'negative'):
cnt_correct += 1
elif args.dataset_selection == 5:
if not (cur_true_label == 'positive' or cur_true_label == 'negative' or cur_true_label == 'neutral'):
raise Exception("Not acceptable cur_true_label: {}, it should either be positive or negative or neutral.".format(cur_true_label))
if (prob_positive_pred >= prob_negative_pred and prob_positive_pred >= prob_neutral_pred and cur_true_label == 'positive') or (prob_negative_pred > prob_positive_pred and prob_negative_pred > prob_neutral_pred and cur_true_label == 'negative') or (prob_neutral_pred > prob_positive_pred and prob_neutral_pred > prob_negative_pred and cur_true_label == 'neutral'):
cnt_correct += 1
elif args.dataset_selection == 6:
if not (cur_true_label == '0' or cur_true_label == '1' or cur_true_label == '2' or cur_true_label == '3' or cur_true_label == '4'):
raise Exception("Not acceptable cur_true_label: {}, it should be between 0~4.".format(cur_true_label))
if int(cur_true_label) == np.argmax([prob_0_pred.item(), prob_1_pred.item(), prob_2_pred.item(), prob_3_pred.item(), prob_4_pred.item()]):
cnt_correct += 1
batch_accuracy = cnt_correct / cur_batch_size
else:
batch_accuracy = None
### (end) This block of code is only used for sentiment sentence classification dataset
return loss, logits, pooled_embedding_retr_cases, pooled_embedding_cur_batch, batch_accuracy
# batch: [case_aug_gene_input_id, case_aug_gene_attention_mask, case_aug_gene_lm_labels]
# case_aug_gene_input_id: [batch_size, cases_per_doc * input_len_gene + 1 + input_len_gene + 1]
# case_aug_gene_attention_mask: [batch_size, cases_per_doc * input_len_gene + 1 + input_len_gene + 1]
# case_aug_gene_lm_labels: [batch_size, cases_per_doc * input_len_gene + 1 + input_len_gene + 1]
def batch_step_eval(args, model_generator, batch, tokenizer_gene):
## prepare input data
batch_gene = tuple(t.to(device1) for t in batch)
# Q: originally here use [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene + 1]
# [batch_size, n_doc, cases_per_doc * input_doc_len_gene + input_cur_len_gene]
case_aug_gene_input_id, case_aug_gene_attention_mask, case_aug_gene_lm_labels = batch_gene
batch_size, n_doc, tgt_len_gene = case_aug_gene_input_id.size()[0], case_aug_gene_input_id.size()[1], case_aug_gene_input_id.size()[-1]
assert n_doc == 1
# view case_aug_gene to fit the requirement of gpt2's input
resh_case_aug_gene_input_id = case_aug_gene_input_id.view(-1, tgt_len_gene)
resh_case_aug_gene_attention_mask = case_aug_gene_attention_mask.view(-1, tgt_len_gene)
resh_case_aug_gene_lm_labels = case_aug_gene_lm_labels.view(-1, tgt_len_gene)
## model_generators
# original code, which should use resh_case_aug_gene_lm_labels but not case_aug_gene_lm_labels
# results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, labels=case_aug_gene_lm_labels)
if "gpt2" in args.generator_model_type or "bert" in args.generator_model_type:
results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, labels=resh_case_aug_gene_lm_labels)
elif "bart" in args.generator_model_type or "t5" in args.generator_model_type:
results = model_generator(resh_case_aug_gene_input_id, attention_mask=resh_case_aug_gene_attention_mask, labels=resh_case_aug_gene_lm_labels)
else:
raise NotImplementError
# logits/seq_logits: [batch_size * n_doc, tgt_length, #vocab]
loss, logits = results[0], results[1]
# print('loss: ', loss)
### (start) This block of code is only used for sentiment sentence classification dataset
seq_logits = logits
if args.generator_model_type == "gpt2-lmhead" or "bert" in args.generator_model_type:
# !!! use these three lines to fix the loss calculating bug of GPT2LMHeadModel
seq_logits = seq_logits[..., :-1, :].contiguous()
shift_labels = resh_case_aug_gene_lm_labels[..., 1:].contiguous()
elif "bart" in args.generator_model_type or "t5" in args.generator_model_type:
seq_logits = seq_logits.contiguous()
shift_labels = resh_case_aug_gene_lm_labels.contiguous()
else:
raise NotImplementError
seq_logprobs = F.log_softmax(seq_logits, dim=-1).view(seq_logits.shape[0] // args.n_doc, args.n_doc, -1, seq_logits.shape[-1])
# seq_logprobs: [batch_size, n_doc, seq_length, len_words]
# calculate accuracy when the dataset is "sentiment sentence classification" dataset
if args.dataset_selection == 4 or args.dataset_selection == 5 or args.dataset_selection == 6 or args.dataset_selection == 7:
# print("seq_logprobs.size(): ", seq_logprobs.size())
cur_batch_size = seq_logprobs.size()[0]
## true label
true_label = []
# case_aug_gene_lm_labels: [batch_size, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene]
## pred label
cnt_correct = 0
for cur_data_id in range(cur_batch_size):
# assume only one doc is considered, else is not implemented yet
if not case_aug_gene_lm_labels.size()[1] == 1:
print("Current code is only designed for the situation where only n_doc is 1.")
# print("case_aug_gene_lm_labels[cur_data_id, 0, :10]: ", case_aug_gene_lm_labels[cur_data_id, 0, :10])
if "bart" in args.generator_model_type:
# cur_true_label = tokenizer_gene.decode(case_aug_gene_lm_labels[cur_data_id, 0, 0]).strip()
# since we have added a <bos> token
cur_true_label = tokenizer_gene.decode(case_aug_gene_lm_labels[cur_data_id, 0, 1]).strip()
else:
raise NotImplementedError
if "bart" in args.generator_model_type:
prob_positive_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 22173])
prob_negative_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 33407])
prob_neutral_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 12516])
prob_0_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 288])
prob_1_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 134])
prob_2_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 176])
prob_3_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 246])
prob_4_pred = torch.exp(seq_logprobs[cur_data_id, 0, 1, 306])
else:
raise NotImplementedError
if args.dataset_selection == 4 or args.dataset_selection == 7:
if not (cur_true_label == 'positive' or cur_true_label == 'negative'):
raise Exception("Not acceptable cur_true_label: {}, it should either be positive or negative.".format(cur_true_label))
if (prob_positive_pred >= prob_negative_pred and cur_true_label == 'positive') or (prob_positive_pred < prob_negative_pred and cur_true_label == 'negative'):
cnt_correct += 1
elif args.dataset_selection == 5:
if not (cur_true_label == 'positive' or cur_true_label == 'negative' or cur_true_label == 'neutral'):
raise Exception("Not acceptable cur_true_label: {}, it should either be positive or negative or neutral.".format(cur_true_label))
if (prob_positive_pred >= prob_negative_pred and prob_positive_pred >= prob_neutral_pred and cur_true_label == 'positive') or (prob_negative_pred > prob_positive_pred and prob_negative_pred > prob_neutral_pred and cur_true_label == 'negative') or (prob_neutral_pred > prob_positive_pred and prob_neutral_pred > prob_negative_pred and cur_true_label == 'neutral'):
cnt_correct += 1
elif args.dataset_selection == 6:
if not (cur_true_label == '0' or cur_true_label == '1' or cur_true_label == '2' or cur_true_label == '3' or cur_true_label == '4'):
raise Exception("Not acceptable cur_true_label: {}, it should be between 0~4.".format(cur_true_label))
if int(cur_true_label) == np.argmax([prob_0_pred.item(), prob_1_pred.item(), prob_2_pred.item(), prob_3_pred.item(), prob_4_pred.item()]):
cnt_correct += 1
batch_accuracy = cnt_correct / cur_batch_size
else:
batch_accuracy = None
### (end) This block of code is only used for sentiment sentence classification dataset
return loss, logits, batch_accuracy
# added max_additional_cases
def evaluate(args, model_generator, model_retriever, model_retriever_doc, tokenizer_gene, \
dataloader_in_bundle_eval_or_test, path_next_bundle_eval_or_test, path_retriever_eval_or_test, \
path_retriever_doc_eval_or_test, path_retrieved_encoded_cases_eval_or_test, data_type, path_cnt_saved_bundle, path_cnt_retrieved_bundle):
# send a signal to retriever
if data_type == 'eval':
print('INFO: begin evaluating...')
## send signal to retriever
path_signal_file_if_eval_or_test = os.path.join(args.output_dir, 'under_evaluation.pt')
torch.save(torch.ones(1), path_signal_file_if_eval_or_test)
if_eval_analysis = False
id_cnt_bundle = 1
elif data_type == 'test':
print('INFO: begin testing...')
path_signal_file_if_eval_or_test = os.path.join(args.output_dir, 'under_evaluation_test.pt')
torch.save(torch.ones(1), path_signal_file_if_eval_or_test)
if_eval_analysis = args.if_eval_analysis
id_cnt_bundle = 2
else:
raise Exception('Wrong data type! data_type: ', data_type)
# only save retriever once
if not (args.if_fast_train and args.if_comet_baseline):
while os.path.exists(path_retriever_eval_or_test) or os.path.exists(path_retriever_doc_eval_or_test):
print('Warning: path_retriever_eval_or_test or path_retriever_doc_eval_or_test still exists!')
time.sleep(5)
torch.save(model_retriever.state_dict(), path_retriever_eval_or_test)
torch.save(model_retriever_doc.state_dict(), path_retriever_doc_eval_or_test)
eval_loss = 0
nb_eval_steps = 0
num_displays = 1
if args.dataset_selection == 4 or args.dataset_selection == 5 or args.dataset_selection == 6 or args.dataset_selection == 7:
eval_ttl_cnt_correct = 0
if if_eval_analysis:
# Embed_docs, Embed_cur_query = None, None
Loss_eval = []
# display_batch_indices
display_batch_indices = list(range(len(dataloader_in_bundle_eval_or_test)))
random.shuffle(display_batch_indices)
display_batch_indices = display_batch_indices[:num_displays]
# eos_token, path_next_bundle_eval_or_test, num_bundles, itr_eval_bundleloader
if args.generator_model_type == "gpt2-lmhead" or "t5" in args.generator_model_type:
eos_token = tokenizer_gene.encode(tokenizer_gene.eos_token)[0]
elif args.generator_model_type == "bart-base" or args.generator_model_type == "bart-large" or 'bert' in args.generator_model_type:
eos_token = tokenizer_gene.encode(tokenizer_gene.eos_token)[1]
else:
raise NotImplementError
retrieved_cases_cur_bundle = None
print("\n\nsome examples")
num_bundles = len(dataloader_in_bundle_eval_or_test)
print('num_bundles:', num_bundles)
itr_eval_bundleloader = iter(dataloader_in_bundle_eval_or_test)
for id_bundle in range(num_bundles):
# FQ: add 'or id_bundle == 0', in case num_bundles == 1
if id_bundle < num_bundles - 1 or id_bundle == 0:
next_bundle = next(itr_eval_bundleloader)
# about next_bundle
if data_type == 'eval':
path_prev_next_bundle, path_cur_next_bundle, cnt_bundle = get_path_cur_next_bundle(args, \
path_cnt_saved_bundle, data_type, path_next_bundle_eval=path_next_bundle_eval_or_test)
elif data_type == 'test':
path_prev_next_bundle, path_cur_next_bundle, cnt_bundle = get_path_cur_next_bundle(args, \
path_cnt_saved_bundle, data_type, path_next_bundle_test=path_next_bundle_eval_or_test)
if not (args.if_fast_train and args.if_comet_baseline):
# not saving retriever
while os.path.exists(path_prev_next_bundle):
time.sleep(5)
try:
torch.save(next_bundle, path_cur_next_bundle)
except:
time.sleep(5)
print("Exception occurs when saving next_bundle")
torch.save(next_bundle, path_cur_next_bundle)
cnt_bundle[id_cnt_bundle] += 1
torch.save(cnt_bundle, path_cnt_saved_bundle)
if id_bundle == 0:
cur_bundle = next_bundle
# need not to re-embed the train cases
# retrieved_cases_cur_bundle = wait_get_remove_cases_for_bundle(path_retrieved_encoded_cases_eval_or_test)
if data_type == 'eval':
## only when if_fast_train and if_comet_baseline, we do not need to wait for retrieved_cases_cur_bundle
if not (args.if_fast_train and args.if_comet_baseline):
retrieved_cases_cur_bundle = wait_get_remove_cases_for_bundle_while_deleting_bad_cases_file(args, \
path_cnt_retrieved_bundle, data_type, path_retrieved_encoded_cases_eval=path_retrieved_encoded_cases_eval_or_test)
elif data_type == 'test':
## only when if_fast_train and if_comet_baseline, we do not need to wait for retrieved_cases_cur_bundle
if not (args.if_fast_train and args.if_comet_baseline):
retrieved_cases_cur_bundle = wait_get_remove_cases_for_bundle_while_deleting_bad_cases_file(args, \
path_cnt_retrieved_bundle, data_type, path_retrieved_encoded_cases_test=path_retrieved_encoded_cases_eval_or_test)
# FQ: add 'and num_bundles > 1', in case num_bundles == 1
if id_bundle == 0 and num_bundles > 1:
next_bundle = next(itr_eval_bundleloader)
# about next_bundle
if data_type == 'eval':
path_prev_next_bundle, path_cur_next_bundle, cnt_bundle = get_path_cur_next_bundle(args, \
path_cnt_saved_bundle, data_type, path_next_bundle_eval=path_next_bundle_eval_or_test)
elif data_type == 'test':
path_prev_next_bundle, path_cur_next_bundle, cnt_bundle = get_path_cur_next_bundle(args, \
path_cnt_saved_bundle, data_type, path_next_bundle_test=path_next_bundle_eval_or_test)
while os.path.exists(path_prev_next_bundle):
time.sleep(5)
try:
torch.save(next_bundle, path_cur_next_bundle)
except:
time.sleep(5)
print("Exception occurs when saving next_bundle")
torch.save(next_bundle, path_cur_next_bundle)
cnt_bundle[id_cnt_bundle] += 1
torch.save(cnt_bundle, path_cnt_saved_bundle)
if "bart" in args.generator_model_type or "gpt2" in args.generator_model_type or "bert" in args.generator_model_type or "t5" in args.generator_model_type:
if args.if_fast_train:
case_aug_cur_bundle = concat_cur_bundle_and_encoded_cases_EOSfixed_Bart_randomly_mask_demonstrations(args, cur_bundle, retrieved_cases_cur_bundle, tokenizer_gene, data_type)
else:
case_aug_cur_bundle = concat_cur_bundle_and_encoded_cases_EOSfixed_Bart(args, cur_bundle, retrieved_cases_cur_bundle, tokenizer_gene)
else:
raise Exception("Not supported generator_model_type: ", args.generator_model_type)
if if_eval_analysis == False:
# case_aug_cur_bundle only need to contain input for model_generator
case_aug_cur_bundle = case_aug_cur_bundle[0:3]
# get dataloader_in_batch for current bundle
data_in_batch = TensorDataset(*case_aug_cur_bundle)
sampler_in_batch = SequentialSampler(data_in_batch)
if data_type == 'eval':
dataloader_in_batch = DataLoader(data_in_batch, sampler=sampler_in_batch, batch_size=args.dev_batch_size)
elif data_type == 'test':
dataloader_in_batch = DataLoader(data_in_batch, sampler=sampler_in_batch, batch_size=args.test_batch_size)
for step, batch in enumerate(dataloader_in_batch):
batch_size = batch[0].size()[0]
# print('batch_size:', batch_size)
# input_ids: [len_bundle, n_doc, cases_per_doc * input_len_gene + 1 + input_len_gene + 1]
input_ids, attention_mask, lm_labels = batch
# input_ids = batch[0]
with torch.no_grad():
# batch: [case_aug_gene_input_id, case_aug_gene_attention_mask, case_aug_gene_lm_labels]
# case_aug_gene_input_id: [batch_size, cases_per_doc * input_len_gene + 1 + input_len_gene + 1]
if if_eval_analysis:
loss, logits, tmp_embed_docs, tmp_embed_cur_query, batch_accuracy = batch_step_eval_analysis(args, model_generator, model_retriever, model_retriever_doc, batch, tokenizer_gene)
if id_bundle == 0 and step == 0:
Embed_docs = tmp_embed_docs
Embed_cur_query = tmp_embed_cur_query
Loss_eval.append(loss.item())
else:
Embed_docs = torch.cat((Embed_docs, tmp_embed_docs), dim=0)
Embed_cur_query = torch.cat((Embed_cur_query, tmp_embed_cur_query), dim=0)
Loss_eval.append(loss.item())
# print("step:{}, Embed_docs.size():{}".format(step, Embed_docs.size()))
else:
loss, logits, batch_accuracy = batch_step_eval(args, model_generator, batch, tokenizer_gene)
# print('eval_loss:', eval_loss, 'loss:', loss, 'batch_size:', batch_size, 'input_ids.size():', input_ids.size())
eval_loss += loss * batch_size
# eval_loss += loss
nb_eval_steps += batch_size
if args.dataset_selection == 4 or args.dataset_selection == 5 or args.dataset_selection == 6 or args.dataset_selection == 7:
eval_ttl_cnt_correct += batch_accuracy * batch_size
# print('batch_size:{}, loss:{}, eval_loss:{}, nb_eval_steps:{}'.format(batch_size, loss, eval_loss, nb_eval_steps))
# print some examples
if step in display_batch_indices:
value, indices = logits.max(dim=-1)
sample_index = random.randint(0, batch_size - 1)
print("input_ids:", tokenizer_gene.decode(input_ids[sample_index][0].tolist()))
# print("attention_mask:", attention_mask[sample_index][0].tolist())
# tmp_lm_labels = lm_labels[sample_index][0].tolist()
# tmp_lm_labels = [1 if tmp_lm_labels[i] == -100 else tmp_lm_labels[i] for i in range(len(tmp_lm_labels))]
# print("lm_labels:", tokenizer_gene.decode(tmp_lm_labels))
# IMPORTANT: add max_additional_cases
if "gpt2" in args.generator_model_type:
output = indices[sample_index].tolist()[-args.max_e2:]
output = tokenizer_gene.decode(output)
elif "bart" in args.generator_model_type or "bert" in args.generator_model_type or "t5" in args.generator_model_type:
output = indices[sample_index].tolist()
try:
eos_pos = output.index(eos_token)
output = tokenizer_gene.decode(output[:eos_pos])
except:
output = tokenizer_gene.decode(output)
else:
raise NotImplementError
if step == 150:
# to know whether use max_e2+1 or max_e2
print("output = indices[sample_index].tolist()[-args.max_e2:]")
print("output ids:", output)
print("output:", output)
cur_bundle = next_bundle
eval_loss = eval_loss / nb_eval_steps
if args.dataset_selection == 4 or args.dataset_selection == 5 or args.dataset_selection == 6 or args.dataset_selection == 7:
eval_accuracy = eval_ttl_cnt_correct / nb_eval_steps
print('eval_loss:{}, {}_accuracy: {}, nb_eval_steps:{}'.format(eval_loss, data_type, eval_accuracy, nb_eval_steps))
else:
eval_accuracy = None
print('eval_loss:{}, nb_eval_steps:{}'.format(eval_loss, nb_eval_steps))
## send signal to retriever
os.remove(path_signal_file_if_eval_or_test)
assert not os.path.exists(path_signal_file_if_eval_or_test)
if if_eval_analysis:
torch.save(Embed_docs.to('cpu'), os.path.join(args.output_dir, 'Embed_docs.pt'))
torch.save(Embed_cur_query.to('cpu'), os.path.join(args.output_dir, 'Embed_cur_query.pt'))
torch.save(Loss_eval, os.path.join(args.output_dir, 'Loss_eval.pt'))
return eval_loss.item(), eval_accuracy
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--generator_model_type", type=str, default="gpt2-lmhead",
help="model type: bart-base/t5-base/gpt2-lmhead/...")
parser.add_argument("--retriever_model_type", type=str, default="dpr",
help="model type: dpr/bert/...")
parser.add_argument("--toy", action="store_true", help="test code")
parser.add_argument("--do_train", action="store_true", help="do training")
parser.add_argument("--do_test", action="store_true", help="do testing")
parser.add_argument("--do_eval", action="store_true", help="do evaluation in the end")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--dataStore_dir", default="/export/home/zonglin001/", type=str, required=False, help="The home directory of zonglin.")
parser.add_argument("--train_dataset", type=str, nargs="+", default=["./Data/conceptnet/train100k_CN_sorted.txt"])
# parser.add_argument("--eval_dataset", type=str, nargs="+", default=["data/conceptnet/dev1_CN.txt", "data/conceptnet/dev2_CN.txt"])
parser.add_argument("--eval_dataset", type=str, nargs="+", default=["./Data/conceptnet/dev1_CN_sorted.txt"])
parser.add_argument("--test_dataset", type=str, nargs="+", default=["./Data/conceptnet/test_CN_sorted.txt"])
parser.add_argument("--add_prefix", action="store_true",
help="add a prefix at the beginning of each input when train with multiple dataset")
# parser.add_argument("--add_separator", action="store_true", help="add <sep> between sub/rel/obj")
parser.add_argument("--predict_part", type=str, default="obj", choices=["sub", "rel", "obj", "all"],
help="predict which part of the triples")
# newly added in 8/21/2021; to calculate the proper max_additional_cases
parser.add_argument("--num_cases_per_query", type=int, default=3)
parser.add_argument("--max_additional_cases", type=int, default=150)
parser.add_argument("--max_e1", type=int, default=24)
parser.add_argument("--max_r", type=int, default=10)
parser.add_argument("--max_e2", type=int, default=36)
parser.add_argument("--seed", type=int, default=123)
parser.add_argument("--no_pretrain", action="store_true", help="w/o pretrained parameters initialized")
parser.add_argument("--train_batch_size", type=int, default=16)
parser.add_argument("--dev_batch_size", type=int, default=8)
parser.add_argument("--test_batch_size", type=int, default=1)
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--logging_steps', type=int, default=150)
parser.add_argument("--eval_per_steps", type=int, default=5000)
# change from 16 to 20: 8/23/2021 9:46 p.m.
parser.add_argument("--num_train_epochs", type=int, default=20)
parser.add_argument("--max_grad_norm", type=int, default=1)
parser.add_argument("--learning_rate", type=float, default=1e-5)
parser.add_argument("--warmup_proportion", type=float, default=0.002)
parser.add_argument("--lr_schedule", type=str, default="warmup_linear")
parser.add_argument("--weight_decay", type=float, default=0.0)
parser.add_argument("--adam_epsilon", type=float, default=1e-8)
# added
parser.add_argument("--root_data_dir", type=str, default="./Data/conceptnet/", help="data dir for current dataset; currently used for subset_selection")
# parser.add_argument("--train_cases_dir", type=str, nargs="+", default=["/home/zy223/CBR/pytorch-transformers-comet/examples/conceptnet_cases/train_cases.txt"])
# parser.add_argument("--val_cases_dir", type=str, nargs="+", default=["/home/zy223/CBR/pytorch-transformers-comet/examples/conceptnet_cases/val_cases.txt"])
# parser.add_argument("--test_cases_dir", type=str, nargs="+", default=["/home/zy223/CBR/pytorch-transformers-comet/examples/conceptnet_cases/test_cases.txt"])
parser.add_argument("--if_without_case", action="store_true", help="Filter all cases as '', to compare the effect of cases")
# dataset_selection: 0: conceptnet 1: atomic 2: Shakespeare text style transfer
# 3: e2e (table2text) 4: sentiment sentence classification dataset; 5: financial phase bank dataset; 6: yelp review; 7: twitter review
parser.add_argument("--dataset_selection", type=int, default=0)
parser.add_argument("--n_doc", type=int, default=3)
parser.add_argument("--num_btch_in_bundle", type=int, default=500)
parser.add_argument("--smooth_score", type=float, default=0.05)
parser.add_argument("--if_froze_both_retriever", action="store_true", help="if the lr for retriever doc and retriever is 0")
parser.add_argument("--if_only_froze_doc_retriever", action="store_true", help="if the lr for retriever doc is 0")
parser.add_argument("--if_comet_baseline", action="store_true", help="comet experiment")
parser.add_argument("--if_only_use_retrieved_target", action="store_true", help="if only use retrieved target during generation (not using retrieved source)")
parser.add_argument("--if_only_use_relation_and_retrieved_target", action="store_true", help="if only use relation and retrieved target during generation (not using retrieved source)")
parser.add_argument("--rand_simi_score", action="store_true", help="if using random simi_score")
parser.add_argument("--use_obj_for_retrieval", action="store_true", help="if using obj for retrieval (get embedding and similarity score)")
parser.add_argument("--use_only_sub_rel_for_retrieval", action="store_true", help="if only using sub for retrieval (get embedding and similarity score)")
parser.add_argument("--if_with_strt_mid_promp", action="store_true", help="if use 'Here are some similar cases to infer from: ' and 'Now you can infer: '")
parser.add_argument("--if_use_relation_for_shakes", action="store_true", help="Whether use relation for shakes dataset (Shakespeare's style is)")
parser.add_argument("--use_special_tokens_to_split_retrieved_cases", action="store_true", help="<split_cases> and <split_source/target>")
parser.add_argument("--if_eval_analysis", action="store_true", help="whether to generate embedding for analysis during test time")
# subset_selection: 0~6, -1 means not using subset
parser.add_argument("--subset_selection", type=int, default=-1)
parser.add_argument("--patience", type=int, default=10, help='for early stopping')
parser.add_argument("--if_not_adding_special_relation_tokens", action="store_true", help="not adding <oReact> for instance")
parser.add_argument("--if_without_none", action="store_true", help="You do NOT need to include it in command line, as it will adjust itself in the following code; if not using none data in atomic; will use different train/val/tst data; ")
parser.add_argument("--num_sample", type=int, default=1, help="the nth time of sampling data to use")
parser.add_argument("--if_use_nshot_data", action="store_true", help="The old version of data doesn't try to maintain nshot but only keep the same number of total few-shot data; this new version of data try to maintain nshot")
parser.add_argument("--if_randomly_mask_demonstrations", action="store_true", help="if also use plain few-shot train data (without in-context demonstrations) for training CBRF")
parser.add_argument("--prob_randomly_mask_demonstrations", type=float, default=0.5, help="when if_randomly_mask_demonstrations == True, the prob to mask demonstrations")
parser.add_argument("--if_fast_train", action="store_true", help="only use 1 GPU to do the work, but can't update retriever; also uses smooth loss, so nealy exact the same as origin batch_step()")
parser.add_argument("--if_try_one_gpu", action="store_true", help="whether only use 1 GPU")
args = parser.parse_args()
# We don't allow coexistence of (if_comet_baseline and if_only_use_retrieved_target)
assert not (args.if_comet_baseline and args.if_only_use_retrieved_target)
assert not (args.if_only_use_retrieved_target and args.if_only_use_relation_and_retrieved_target)
assert not (args.if_comet_baseline and args.if_only_use_relation_and_retrieved_target)
assert args.prob_randomly_mask_demonstrations >= 0.0 and args.prob_randomly_mask_demonstrations <= 1.0
# if_randomly_mask_demonstrations and if_comet_baseline can't be true at the same time
# a small hyperparameter error preventer
assert not (args.if_randomly_mask_demonstrations and args.if_comet_baseline)
# Can't update retriever while using args.if_fast_train
if args.if_try_one_gpu:
assert args.if_fast_train
if args.if_fast_train:
assert args.if_froze_both_retriever
# Do not support generate embedding when args.if_fast_train == True
assert not args.if_eval_analysis
# prevent from using wrong match
if 'gpt2' in args.generator_model_type:
assert 'gpt2' in args.output_dir
elif 'bart' in args.generator_model_type:
assert 'bart' in args.output_dir
print("args.dataset_selection: ", args.dataset_selection)
if args.dataset_selection == 0:
if "t5" in args.generator_model_type:
args.max_e2 = 46
args.if_without_none = False
if args.subset_selection == -1:
args.patience = 5
if args.subset_selection == 0:
args.eval_per_steps = 30
elif args.subset_selection == 1:
args.eval_per_steps = 60
elif args.subset_selection == 2:
args.eval_per_steps = 120
elif args.subset_selection == 3:
args.eval_per_steps = 240
elif args.subset_selection == 4:
args.eval_per_steps = 500
elif args.subset_selection == 5:
args.eval_per_steps = 1000
elif args.subset_selection == 6:
args.eval_per_steps = 2000
elif args.subset_selection == -1:
args.eval_per_steps = 5000
else:
raise NotImplementedError
elif args.dataset_selection == 1:
args.max_e1 = 25
args.max_r = 15
args.max_e2 = 38
# Q: changed from 200 to 250: 8/21/2021: 11:50 p.m.
args.max_additional_cases = 250
args.num_train_epochs = 2
args.train_dataset = ["./Data/atomic/v4_atomic_trn.csv"]
args.eval_dataset = ["./Data/atomic/v4_atomic_dev.csv"]
args.test_dataset = ["./Data/atomic/v4_atomic_tst.csv"]
args.root_data_dir = "./Data/atomic/"
# if_without_none: if not using none data in atomic; will use different train/val/tst data
args.if_without_none = True
print("INFO: using atomic data without 'None' tuples")
# 8/28/2021; if using full set, patience can be smaller
if args.subset_selection == -1:
args.patience = 5
if args.subset_selection == 0:
args.eval_per_steps = 30
elif args.subset_selection == 1:
args.eval_per_steps = 60
elif args.subset_selection == 2:
args.eval_per_steps = 120
elif args.subset_selection == 3:
args.eval_per_steps = 240
elif args.subset_selection == 4:
args.eval_per_steps = 500
elif args.subset_selection == 5:
args.eval_per_steps = 1000
elif args.subset_selection == 6:
args.eval_per_steps = 2000
elif args.subset_selection == -1:
args.eval_per_steps = 5000
else:
raise NotImplementedError
elif args.dataset_selection == 2:
args.max_e1 = 130
if args.if_use_relation_for_shakes:
args.max_r = 6
else:
args.max_r = 2
args.max_e2 = 140
args.max_additional_cases = 500
args.num_train_epochs = 15
args.root_data_dir = "./Data/shakes/"
args.if_without_none = False
print("warning: need to manually set up args.eval_per_steps.")
elif args.dataset_selection == 3:
args.max_e1 = 60
args.max_r = 2
args.max_e2 = 95
args.max_additional_cases = 400