If you have 1000 phrases, and you're searching an input string to find which of those phrases are substrings, you're probably not going to be happy with the performance you get from using a big regular expression. A trie is a bit more work to implement, but it's a lot more efficient: the regular expression a|b|c|d|e
does five tests on each character in a given input string, while a trie only does one. You could conceivably also use a lexer, like Plex, that produces a DFA.
Edit:
I appear to be procrastinating this morning. Try this:
class Trie(object):
def __init__(self):
self.children = {}
self.item = None
def add(self, item, remainder=None):
"""Add an item to the trie."""
if remainder == None:
remainder = item
if remainder == "":
self.item = item
else:
ch = remainder[0]
if not self.children.has_key(ch):
self.children[ch] = Trie()
self.children[ch].add(item, remainder[1:])
def find(self, word):
"""Return True if word is an item in the trie."""
if not word:
return True
ch = word[0]
if not self.children.has_key(ch):
return False
return self.children[ch].find(word[1:])
def find_words(self, word, results=None):
"""Find all items in the trie that word begins with."""
if results == None:
results = []
if self.item:
results.append(self.item)
if not word:
return results
ch = word[0]
if not self.children.has_key(ch):
return results
return self.children[ch].find_words(word[1:], results)
A quick test (words.txt
is the BSD words file, a very handy thing to have around - it contains about 240,000 words):
>>> t = Trie()
>>> with open(r'c:\temp\words.txt', 'r') as f:
for word in f:
t.add(word.strip())
That takes about 15 seconds on my machine. This, however, is almost instantaneous:
>>> s = "I played video games in a drunken haze."
>>> r = []
>>> for i in range(len(s)):
r.extend(t.find_words(s[i:]))
>>> r
['I', 'p', 'play', 'l', 'la', 'lay', 'a', 'ay', 'aye', 'y', 'ye', 'yed', 'e', 'd', 'v', 'video', 'i', 'id', 'ide', 'd', 'de', 'e', 'o', 'g', 'ga', 'gam', 'game', 'a', 'am', 'ame', 'm', 'me', 'e', 'es', 's', 'i', 'in', 'n', 'a', 'd', 'drunk', 'drunken', 'r', 'run', 'u', 'un', 'unken', 'n', 'k', 'ken', 'e', 'en', 'n', 'h', 'ha', 'haze', 'a', 'z', 'e']
Yes, unken
is in words.txt. I have no idea why.
Oh, and I did try to compare with regular expressions:
>>> import re
>>> with open(r'c:\temp\words.txt', 'r') as f:
p = "|".join([l.strip() for l in f])
>>> p = re.compile(p)
Traceback (most recent call last):
File "<pyshell#250>", line 1, in <module>
p = re.compile(p)
File "C:\Python26\lib\re.py", line 188, in compile
return _compile(pattern, flags)
File "C:\Python26\lib\re.py", line 241, in _compile
p = sre_compile.compile(pattern, flags)
File "C:\Python26\lib\sre_compile.py", line 529, in compile
groupindex, indexgroup
OverflowError: regular expression code size limit exceeded