-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtranscriptPrepare.py
More file actions
84 lines (65 loc) · 3.33 KB
/
transcriptPrepare.py
File metadata and controls
84 lines (65 loc) · 3.33 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
import csv
import os
import numpy as np
import re # regular expressions
import argparse #For command line arguments
import pandas as pd # data frame functionality
import pymysql
## MAIN ##
def main():
args = parser.parse_args()
## Preamble ========================
print("\n\n\n")
print("=" * 50)
print("PREPARING TRANSCRIPTS FOR RESOLUTION!!")
## Import transcription file ========================
print("\nImporting transcription file ", args.file, " ...")
data = pd.read_csv(os.path.join(args.wd, args.file), encoding = "ISO-8859-1", dtype = "object")
data = data.fillna("")
## Prepping transcription file ========================
print("\nPrepping transcription file...")
# Exclude non-calbug entries
print("\nExcluding non-Calbug transcriptions ...")
data = data[data["collection"] == "Calbug"]
# Convert Collector names to lower case to facilitate alignments
data["Collector"] = [collector.lower() for collector in data["Collector"]]
# Connect to essig database
conn = pymysql.connect(host = "gall.bnhm.berkeley.edu",\
user = args.username, # should be args.username
passwd = args.password, # should be args.password
db = "essig")
essigIDs = pd.read_sql('select bnhm_id from eme;', con=conn)
essigIDs = list(essigIDs["bnhm_id"])
# exclude specimens that are already in the database
##comment## are all the nfn entries meant for calbug, this stage might basically exclude all the non-essig transcirptions
##comment## assumes that a specimen ID does not have multiple filenames
print("\nExcluding specimens that have already been databased ...")
unique_filename = list(set(data[args.col_id])) # create a dictionary of bnhm_id and filename
id_dict = dict()
for f in unique_filename:
ids = re.sub("\s+", "", f)
ids = re.search("((EMEC|LACMENT|CASENT|UCBME|CIS|UCRCENT|SDNHM|UMMZI|SBMNHENT)[0-9]*)", ids).group()
id_dict[ids] = f
unique_ids = set(id_dict.keys())
completed_ids = list(unique_ids.intersection(essigIDs))
completed_filenames = [id_dict[f] for f in completed_ids]
completed_filenames
# exclude filenames whose bnhm_id is already in the essig database
data = data[~data["filename"].isin(completed_filenames)] # ~ means the inverse; so filenames that have not been completed
## Export the new data file ========================
if args.output:
outputfile = args.output
else:
outputfile = "prep_transcript"
print("\nExporting prepared transcriptions to", os.getcwd())
data.to_csv(os.path.join(args.wd, outputfile + ".csv"), index = False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="transcriptPrepare - Let's prepare some crowd-sourced transcripts!")
parser.add_argument("-file", "-f", help = "File with transcriptions")
parser.add_argument("-output", help = "Output file name")
parser.add_argument("-wd", help = "Working directory")
parser.add_argument("-col_id", help = "List of columns to be checked")
parser.add_argument("-username", help = "Username. Access to essig SQL database")
parser.add_argument("-password", help = "Password. Access to essig SQL database")
main()
##todo## logging the results