forked from adsabs/ADSManualParser
-
Notifications
You must be signed in to change notification settings - Fork 0
/
run.py
331 lines (290 loc) · 12.3 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
import argparse
import json
import os
import re
from adsenrich.references import ReferenceWriter
from adsingestp.parsers.crossref import CrossrefParser
from adsingestp.parsers.jats import JATSParser
from adsingestp.parsers.datacite import DataciteParser
from adsingestp.parsers.dubcore import DublinCoreParser
from adsingestp.parsers.elsevier import ElsevierParser
from adsingestp.parsers.adsfeedback import ADSFeedbackParser
from adsingestp.parsers.copernicus import CopernicusParser
from adsingestp.parsers.wiley import WileyParser
from adsmanparse import translator, doiharvest, classic_serializer, utils
from adsputils import load_config, setup_logging
from datetime import datetime, timedelta
from glob import iglob
PARSER_TYPES = {'jats': JATSParser(),
'dc': DataciteParser(),
'cr': CrossrefParser(),
'nlm': JATSParser(),
'elsevier': ElsevierParser(),
'feedback': ADSFeedbackParser(),
'copernicus': CopernicusParser(),
'wiley': WileyParser(),
'dubcore': DublinCoreParser(),
}
proj_home = os.path.realpath(os.path.join(os.path.dirname(__file__), "./"))
conf = load_config(proj_home=proj_home)
logger = setup_logging(
"run.py",
proj_home=proj_home,
level=conf.get("LOGGING_LEVEL", "INFO"),
attach_stdout=conf.get("LOG_STDOUT", False),
)
def get_args():
parser = argparse.ArgumentParser('Create an ADS record from a DOI')
parser.add_argument('-b',
'--bibstem',
dest='bibstem',
action='store',
default=None,
help='Bibstem for special handling and/or bibcode(s)')
parser.add_argument('-v',
'--volume',
dest='volume',
action='store',
default=None,
help='Volume for special handling and/or bibcodes(s)')
parser.add_argument('-d',
'--doi',
dest='fetch_doi',
action='store',
default=None,
help='DOI to fetch')
parser.add_argument('-l',
'--doi-list',
dest='fetch_doi_list',
action='store',
default=None,
help='Path to a file containing a list of DOIs, one per line')
parser.add_argument('-f',
'--outfile',
dest='output_file',
action='store',
default='./doi.tag',
help='File that tagged format will be written to')
parser.add_argument('-p',
'--proc_path',
dest='proc_path',
action='store',
default=None,
help='Path to files or list of files')
parser.add_argument('-a',
'--age',
dest='proc_since',
action='store',
default=None,
help='Age (in days) of oldest files in --proc_path to process')
parser.add_argument('-t',
'--file_type',
dest='file_type',
action='store',
default=None,
help='Type of input file: jats, dc, cr, nlm, elsevier, feedback, copernicus, wiley')
parser.add_argument('-w',
'--write_refs',
dest='write_refs',
action='store_true',
default=False,
help='Export references from records along with bibdata')
parser.add_argument('-r',
'--ref_dir',
dest='ref_dir',
action='store',
default='/proj/ads/references/sources/',
help='Base path to reference output directory')
parser.add_argument('-s',
'--source',
dest='source',
action='store',
default=None,
help='Origin/publisher of record/reference data')
parser.add_argument('-z',
'--parsedfile',
dest='parsedfile',
action='store_true',
default=False,
help='Output parsed filename in properties tag')
parser.add_argument('-x',
'--write_xref',
dest='write_xref',
action='store_true',
default=False,
help='Write doi-harvested records to xml file')
args = parser.parse_args()
return args
def create_tagged(rec=None, args=None):
try:
xlator = translator.Translator()
seri = classic_serializer.ClassicSerializer()
xlator.translate(data=rec, bibstem=args.bibstem, volume=args.volume, parsedfile=args.parsedfile)
output = seri.output(xlator.output)
return output
except Exception as err:
logger.warning("Export to tagged file failed: %s\t%s" % (err, rec))
def write_xml(inputRecord):
try:
if inputRecord.get("type", None) == "cr":
doi = inputRecord.get("name", None)
raw_data = inputRecord.get("data", "")
output_dir = conf.get("XML_OUTPUT_BASEDIR", "./doi/")
doi_to_path = doi.split("/")
path = []
if "http" in doi_to_path[0].lower():
doi_to_path = doi_to_path[3:]
for d in doi_to_path:
path.append(re.sub(r"[^\w_. -]+", "_", d))
filepath = "/".join(path)+'.xml'
output_path = output_dir + filepath
dirname = os.path.dirname(output_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(output_path, "w") as fx:
fx.write("%s\n" % raw_data)
else:
raise Exception("This is not a crossref file.")
except Exception as err:
logger.warning("Export of doi (%s) to xml failed: %s" % (doi, err))
def create_refs(rec=None, args=None):
try:
rw = ReferenceWriter(reference_directory=args.ref_dir,
reference_source=args.source,
data=rec)
rw.write_references_to_file()
except Exception as err:
logger.warning("Unable to write references: %s" % err)
def write_record(record, args):
if args.output_file:
tagged = create_tagged(rec=record, args=args)
if tagged:
with open(args.output_file, "a") as fout:
fout.write("%s\n" % tagged)
if args.write_refs:
create_refs(rec=record, args=args)
else:
raise Exception("Tagged record not generated.")
else:
raise Exception("Output_file not defined, no place to write records to!")
def parse_record(rec):
pdata = rec.get('data', None)
ptype = rec.get('type', None)
filename = rec.get('name', None)
parser = PARSER_TYPES.get(ptype, None)
write_file = utils.has_body(pdata)
parsedrecord = None
if not parser:
logger.error("No parser available for file_type '%s'." % ptype)
else:
try:
parser.__init__()
if ptype == 'nlm':
parsedrecord = parser.parse(pdata, bsparser='lxml-xml')
else:
parsedrecord = parser.parse(pdata)
if parsedrecord:
if utils.suppress_title(parsedrecord, conf.get("DEPRECATED_TITLES", [])):
parsedrecord = None
raise Exception("Warning: article matches a suppressed title.")
if filename:
if not parsedrecord.get("recordData", {}).get("loadLocation", None):
parsedrecord["recordData"]["loadLocation"] = filename
if not write_file:
parsedrecord["recordData"]["loadLocation"] = None
else:
raise Exception("Null body returned by parser!")
except Exception as err:
logger.warning("Error parsing record (%s): %s" % (filename,err))
return parsedrecord
def process_record(rec, args):
try:
parsedRecord = parse_record(rec)
if not parsedRecord:
logger.error("Parsing yielded no data for %s" % rec.get("name", None))
else:
try:
write_record(parsedRecord, args)
except Exception as err:
logger.error("Classic tagger did not generate a tagged record for %s" % f)
else:
logger.debug("Successfully processed %s with %s" % (rec.get("name", None), str(args)))
except Exception as err:
logger.error("Error parsing and processing record %s: %s" % (rec.get("name", None), err))
def process_filepath(args):
if args.proc_path:
logger.info("Finding files in path %s ..." % args.proc_path)
infiles = [x for x in iglob(args.proc_path, recursive=True)]
if not infiles:
logger.warning("No files found in path %s." % args.proc_path)
else:
logger.info("Found %s files." % len(infiles))
if args.proc_since:
logger.info("Checking file ages...")
dtime = timedelta(days=int(args.proc_since))
today = datetime.today()
infiles_since = [x for x in infiles if ((today - datetime.fromtimestamp(os.path.getmtime(x))) <= dtime)]
infiles = infiles_since
if not infiles:
logger.error("No files more recent than %s days old!" % str(args.proc_since))
else:
nfiles = len(infiles)
logger.info("There were %s files found to process" % str(nfiles))
for f in infiles:
inputRecord = {}
try:
with open(f, 'r') as fin:
inputRecord = {'data': fin.read(),
'name': f,
'type': args.file_type}
except Exception as err:
logger.warning("Failed to read input file %s: %s" % (f, err))
else:
process_record(inputRecord, args)
else:
logger.warning("Null processing path given, nothing processed.")
def process_doilist(doilist, args):
if doilist:
ptype = args.file_type
if not ptype:
ptype = 'cr'
for d in doilist:
try:
getdoi = doiharvest.DoiHarvester(doi=d)
doi_record = getdoi.get_record()
inputRecord = {'data': doi_record,
'name': d,
'type': ptype}
if args.write_xref:
write_xml(inputRecord)
except Exception as err:
logger.warning("Failed to fetch doi %s: %s" % (d, err))
else:
process_record(inputRecord, args)
else:
logger.warning("No DOIs provided, nothing processed.")
def main():
args = get_args()
rawDataList = []
ingestDocList = []
logger.debug("Initiating parsing with the following arguments: %s" % str(args))
if args.proc_path and not args.file_type:
fileTypeList = PARSER_TYPES.keys()
logger.error("You need to provide a filetype from this list: %s" % str(fileTypeList))
else:
# This route processes data from user-input files
if args.proc_path:
process_filepath(args)
# This route fetches data from Crossref via the Habanero module
elif (args.fetch_doi or args.fetch_doi_list):
doiList = None
if args.fetch_doi:
doiList = [args.fetch_doi]
elif args.fetch_doi_list:
doiList = []
with open(args.fetch_doi_list, 'r') as fin:
for l in fin.readlines():
doiList.append(l.strip())
process_doilist(doiList, args)
if __name__ == '__main__':
main()