-
Notifications
You must be signed in to change notification settings - Fork 4
/
test_bi.py
32 lines (25 loc) · 982 Bytes
/
test_bi.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
from sentence_transformers import SentenceTransformer, InputExample, losses, util,evaluation
from torch.utils.data import DataLoader
import pickle
from scipy.stats import kendalltau,pearsonr
with open('pklfiles/test_dev_map.pkl', 'rb') as f:
q_map_first_doc_test=pickle.load(f)
model_name="tuned_model_bi_bert-base-uncased_e_1_b_8"
sentences1 = []
sentences2 = []
map_value_test=[]
qs=[]
for key in q_map_first_doc_test:
sentences1.append(q_map_first_doc_test[key]["qtext"])
sentences2.append(q_map_first_doc_test[key]["doc_text"])
qs.append(key)
model=SentenceTransformer("models/"+model_name)
embeddings2 = model.encode(sentences2, convert_to_tensor=True)
embeddings1 = model.encode(sentences1, convert_to_tensor=True)
cosine_scores = util.pytorch_cos_sim(embeddings1, embeddings2)
actual=[]
predicted=[]
out=open('results/QPP-bi_'+model_name,'w')
for i in range(len(sentences1)):
out.write(qs[i]+'\t'+str(float(cosine_scores[i][i]))+'\n')
out.close()