-
Notifications
You must be signed in to change notification settings - Fork 9
/
happierfuntokenizing.py
267 lines (232 loc) · 9.17 KB
/
happierfuntokenizing.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This code implements a basic, Twitter-aware tokenizer.
A tokenizer is a function that splits a string of text into words. In
Python terms, we map string and unicode objects into lists of unicode
objects.
There is not a single right way to do tokenizing. The best method
depends on the application. This tokenizer is designed to be flexible
and this easy to adapt to new domains and tasks. The basic logic is
this:
1. The tuple regex_strings defines a list of regular expression
strings.
2. The regex_strings strings are put, in order, into a compiled
regular expression object called word_re.
3. The tokenization is done by word_re.findall(s), where s is the
user-supplied string, inside the tokenize() method of the class
Tokenizer.
4. When instantiating Tokenizer objects, there is a single option:
preserve_case. By default, it is set to True. If it is set to
False, then the tokenizer will downcase everything except for
emoticons.
The __main__ method illustrates by tokenizing a few examples.
I've also included a Tokenizer method tokenize_random_tweet(). If the
twitter library is installed (http://code.google.com/p/python-twitter/)
and Twitter is cooperating, then it should tokenize a random
English-language tweet.
"""
__author__ = "original: Christopher Potts, updated: H. Andrew Schwartz"
__copyright__ = "Copyright 2011, Christopher Potts"
__credits__ = []
__license__ = "Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License: http://creativecommons.org/licenses/by-nc-sa/3.0/"
__version__ = "1.5"
__maintainer__ = "H. Andrew Schwartz, Maarten Sap"
__email__ = "See the author's website"
######################################################################
import re
import html.entities
######################################################################
# The following strings are components in the regular expression
# that is used for tokenizing. It's important that phone_number
# appears first in the final regex (since it can contain whitespace).
# It also could matter that tags comes after emoticons, due to the
# possibility of having text like
#
# <:| and some text >:)
#
# Most imporatantly, the final element should always be last, since it
# does a last ditch whitespace-based tokenization of whatever is left.
# This particular element is used in a couple ways, so we define it
# with a name:
emoticon_string = r"""
(?:
[<>]?
[:;=8>] # eyes
[\-o\*\']? # optional nose
[\)\]\(\[dDpPxX/\:\}\{@\|\\] # mouth
|
[\)\]\(\[dDpPxX/\:\}\{@\|\\] # mouth
[\-o\*\']? # optional nose
[:;=8<] # eyes
[<>]?
|
<[/\\]?3 # heart(added: has)
|
\(?\(?\#? #left cheeck
[>\-\^\*\+o\~] #left eye
[\_\.\|oO\,] #nose
[<\-\^\*\+o\~] #right eye
[\#\;]?\)?\)? #right cheek
)"""
# The components of the tokenizer:
regex_strings = (
# Phone numbers:
r"""
(?:
(?: # (international)
\+?[01]
[\-\s.]*
)?
(?: # (area code)
[\(]?
\d{3}
[\-\s.\)]*
)?
\d{3} # exchange
[\-\s.]*
\d{4} # base
)"""
,
# Emoticons:
emoticon_string
,
# http:
# Web Address:
r"""(?:(?:http[s]?\:\/\/)?(?:[\w\_\-]+\.)+(?:com|net|gov|edu|info|org|ly|be|gl|co|gs|pr|me|cc|us|gd|nl|ws|am|im|fm|kr|to|jp|sg)(?:\/[\s\b$])?)"""
,
r"""(?:http[s]?\:\/\/)""" #need to capture it alone sometimes
,
#command in parens:
r"""(?:\[[\w_]+\])""" #need to capture it alone sometimes
,
# HTTP GET Info
r"""(?:\/\w+\?(?:\;?\w+\=\w+)+)"""
,
# HTML tags:
r"""(?:<[^>]+=[^>]+>|<[^>]+\s\/>|<[^>\s]+>?|<?[^<\s]+>)"""
#r"""(?:<[^>]+\w+[^>]+>|<[^>\s]+>?|<?[^<\s]+>)"""
,
# Twitter username:
r"""(?:@[\w_]+)"""
,
# Twitter hashtags:
r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)"""
,
# NER tags:
r"""(?:\[[^\]]\w+\:?\w+[^\]]\])"""
,
# Remaining word types:
r"""
(?:[\w][\w'\-_]+[\w]) # Words with apostrophes or dashes.
|
(?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
|
(?:[\w_]+) # Words without apostrophes or dashes.
|
(?:\.(?:\s*\.){1,}) # Ellipsis dots.
|
(?:\S) # Everything else that isn't whitespace.
"""
)
######################################################################
# This is the core tokenizing regex:
word_re = re.compile(r"""(%s)""" % "|".join(regex_strings), re.VERBOSE | re.I | re.UNICODE)
# The emoticon string gets its own regex so that we can preserve case for them as needed:
emoticon_re = re.compile(regex_strings[1], re.VERBOSE | re.I | re.UNICODE)
# These are for regularizing HTML entities to Unicode:
html_entity_digit_re = re.compile(r"&#\d+;")
html_entity_alpha_re = re.compile(r"&\w+;")
amp = "&"
hex_re = re.compile(r'\\x[0-9a-z]{1,4}')
######################################################################
class Tokenizer:
def __init__(self, preserve_case=False, use_unicode=True):
self.preserve_case = preserve_case
self.use_unicode = use_unicode
def tokenize(self, s):
"""
Argument: s -- any string or unicode object
Value: a tokenize list of strings; conatenating this list returns the original string if preserve_case=False
"""
# Try to ensure unicode:
if self.use_unicode:
try:
s = str(s)
except UnicodeDecodeError:
s = str(s).encode('string_escape')
s = str(s)
# Fix HTML character entitites:
s = self.__html2unicode(s)
s = self.__removeHex(s)
# Tokenize:
words = word_re.findall(s)
#print words #debug
# Possible alter the case, but avoid changing emoticons like :D into :d:
if not self.preserve_case:
words = list(map((lambda x : x if emoticon_re.search(x) else x.lower()), words))
return words
def tokenize_random_tweet(self):
"""
If the twitter library is installed and a twitter connection
can be established, then tokenize a random tweet.
"""
try:
import twitter
except ImportError:
print("Apologies. The random tweet functionality requires the Python twitter library: http://code.google.com/p/python-twitter/")
from random import shuffle
api = twitter.Api()
tweets = api.GetPublicTimeline()
if tweets:
for tweet in tweets:
if tweet.user.lang == 'en':
return self.tokenize(tweet.text)
else:
raise Exception("Apologies. I couldn't get Twitter to give me a public English-language tweet. Perhaps try again")
def __html2unicode(self, s):
"""
Internal metod that seeks to replace all the HTML entities in
s with their corresponding unicode characters.
"""
# First the digits:
ents = set(html_entity_digit_re.findall(s))
if len(ents) > 0:
for ent in ents:
entnum = ent[2:-1]
try:
entnum = int(entnum)
s = s.replace(ent, chr(entnum))
except:
pass
# Now the alpha versions:
ents = set(html_entity_alpha_re.findall(s))
ents = list(filter((lambda x : x != amp), ents))
for ent in ents:
entname = ent[1:-1]
try:
s = s.replace(ent, chr(html.entities.name2codepoint[entname]))
except:
pass
s = s.replace(amp, " and ")
return s
def __removeHex(self, s):
return hex_re.sub(' ', s)
###############################################################################
if __name__ == '__main__':
tok = Tokenizer(preserve_case=False)
import sys
samples = (
"RT @ #happyfuncoding: this is a typical Twitter tweet :-)",
"It's perhaps noteworthy that phone numbers like +1 (800) 123-4567, (800) 123-4567, and 123-4567 are treated as words despite their whitespace.",
'Something </sarcasm> about <fails to break this up> <3 </3 <\\3 mañana vergüenza güenza création tonterías tonteréas <em class="grumpy">pain</em> <meta name="viewport" content="width=device-width"> <br />',
"This is more like a Facebook message with a url: http://www.youtube.com/watch?v=dQw4w9WgXcQ, youtube.com google.com https://google.com/ ",
"HTML entities & other Web oddities can be an ácute <em class='grumpy'>pain</em> >:( especially with they come from [NER:PERSON]!",
)
if len(sys.argv) > 1 and (sys.argv[1]):
samples = sys.argv[1:]
for s in samples:
print("======================================================================")
print(s)
tokenized = tok.tokenize(s)
print(tokenized)