blob: 50f8bfec541ba9a42118a0b3c5753968367bf162 [file] [log] [blame]
[email protected]bd3f4b3d2011-03-23 20:01:401#!/usr/bin/python
2#
3# Copyright (c) 2011 The Chromium Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6
7""" Lexer for PPAPI IDL """
8
[email protected]d3864f5c12011-04-08 15:19:049#
10# IDL Lexer
11#
12# The lexer is uses the PLY lex library to build a tokenizer which understands
13# WebIDL tokens.
14#
15# WebIDL, and WebIDL regular expressions can be found at:
16# http://dev.w3.org/2006/webapi/WebIDL/
17# PLY can be found at:
18# http://www.dabeaz.com/ply/
[email protected]bd3f4b3d2011-03-23 20:01:4019
[email protected]bd3f4b3d2011-03-23 20:01:4020import os.path
21import re
22import sys
23
24#
25# Try to load the ply module, if not, then assume it is in the third_party
26# directory, relative to ppapi
27#
28try:
29 from ply import lex
30except:
31 module_path, module_name = os.path.split(__file__)
32 third_party = os.path.join(module_path, '..', '..', 'third_party')
33 sys.path.append(third_party)
34 from ply import lex
35
[email protected]5b497ed2011-05-15 22:08:5636from idl_option import GetOption, Option, ParseOptions
[email protected]d3864f5c12011-04-08 15:19:0437
38
[email protected]5b497ed2011-05-15 22:08:5639Option('output', 'Generate output.')
[email protected]5b497ed2011-05-15 22:08:5640
[email protected]bd3f4b3d2011-03-23 20:01:4041#
42# IDL Lexer
43#
44class IDLLexer(object):
45 # 'tokens' is a value required by lex which specifies the complete list
46 # of valid token types.
47 tokens = [
48 # Symbol and keywords types
49 'COMMENT',
50 'DESCRIBE',
51 'ENUM',
[email protected]16796362011-07-02 19:43:1952 'LABEL',
[email protected]bd3f4b3d2011-03-23 20:01:4053 'SYMBOL',
[email protected]16796362011-07-02 19:43:1954 'INLINE',
[email protected]bd3f4b3d2011-03-23 20:01:4055 'INTERFACE',
56 'STRUCT',
57 'TYPEDEF',
58
59 # Data types
60 'FLOAT',
[email protected]d3864f5c12011-04-08 15:19:0461 'OCT',
[email protected]bd3f4b3d2011-03-23 20:01:4062 'INT',
63 'HEX',
64 'STRING',
65
66 # Operators
67 'LSHIFT'
68 ]
69
70 # 'keywords' is a map of string to token type. All SYMBOL tokens are
71 # matched against keywords, to determine if the token is actually a keyword.
72 keywords = {
[email protected]16796362011-07-02 19:43:1973 'attribute' : 'ATTRIBUTE',
[email protected]bd3f4b3d2011-03-23 20:01:4074 'describe' : 'DESCRIBE',
75 'enum' : 'ENUM',
[email protected]16796362011-07-02 19:43:1976 'label' : 'LABEL',
[email protected]bd3f4b3d2011-03-23 20:01:4077 'interface' : 'INTERFACE',
78 'readonly' : 'READONLY',
79 'struct' : 'STRUCT',
80 'typedef' : 'TYPEDEF',
81 }
82
83 # 'literals' is a value expected by lex which specifies a list of valid
84 # literal tokens, meaning the token type and token value are identical.
85 literals = '"*.(){}[],;:=+-'
86
87 # Token definitions
88 #
89 # Lex assumes any value or function in the form of 't_<TYPE>' represents a
90 # regular expression where a match will emit a token of type <TYPE>. In the
[email protected]d3864f5c12011-04-08 15:19:0491 # case of a function, the function is called when a match is made. These
92 # definitions come from WebIDL.
[email protected]bd3f4b3d2011-03-23 20:01:4093
94 # 't_ignore' is a special match of items to ignore
95 t_ignore = ' \t'
96
97 # Constant values
98 t_FLOAT = r'-?(\d+\.\d*|\d*\.\d+)([Ee][+-]?\d+)?|-?\d+[Ee][+-]?\d+'
[email protected]d3864f5c12011-04-08 15:19:0499 t_INT = r'-?[0-9]+'
100 t_OCT = r'-?0[0-7]+'
101 t_HEX = r'-?0[Xx][0-9A-Fa-f]+'
[email protected]bd3f4b3d2011-03-23 20:01:40102 t_LSHIFT = r'<<'
103
104 # A line ending '\n', we use this to increment the line number
105 def t_LINE_END(self, t):
106 r'\n+'
107 self.AddLines(len(t.value))
108
109 # We do not process escapes in the IDL strings. Strings are exclusively
110 # used for attributes, and not used as typical 'C' constants.
111 def t_STRING(self, t):
112 r'"[^"]*"'
113 t.value = t.value[1:-1]
114 self.AddLines(t.value.count('\n'))
115 return t
116
117 # A C or C++ style comment: /* xxx */ or //
118 def t_COMMENT(self, t):
119 r'(/\*(.|\n)*?\*/)|(//.*)'
120 self.AddLines(t.value.count('\n'))
[email protected]16796362011-07-02 19:43:19121 return t
[email protected]bd3f4b3d2011-03-23 20:01:40122
[email protected]16796362011-07-02 19:43:19123 # Return a "preprocessor" inline block
124 def t_INLINE(self, t):
125 r'\#inline (.|\n)*\#endinl.*'
126 self.AddLines(t.value.count('\n'))
[email protected]bd3f4b3d2011-03-23 20:01:40127 return t
128
129 # A symbol or keyword.
130 def t_KEYWORD_SYMBOL(self, t):
131 r'[A-Za-z][A-Za-z_0-9]*'
132
133 #All non-keywords are assumed to be symbols
134 t.type = self.keywords.get(t.value, 'SYMBOL')
135 return t
136
137 def t_ANY_error(self, t):
[email protected]16796362011-07-02 19:43:19138 msg = "Unrecognized input"
[email protected]bd3f4b3d2011-03-23 20:01:40139 line = self.lexobj.lineno
[email protected]16796362011-07-02 19:43:19140
141 # If that line has not been accounted for, then we must have hit
142 # EoF, so compute the beginning of the line that caused the problem.
143 if line >= len(self.index):
144 # Find the offset in the line of the first word causing the issue
145 word = t.value.split()[0]
146 offs = self.lines[line - 1].find(word)
147 # Add the computed line's starting position
148 self.index.append(self.lexobj.lexpos - offs)
149 msg = "Unexpected EoF reached after"
150
[email protected]bd3f4b3d2011-03-23 20:01:40151 pos = self.lexobj.lexpos - self.index[line]
152 file = self.lexobj.filename
[email protected]16796362011-07-02 19:43:19153 out = self.ErrorMessage(file, line, pos, msg)
[email protected]bd3f4b3d2011-03-23 20:01:40154 sys.stderr.write(out + '\n')
155
156 def AddLines(self, count):
157 # Set the lexer position for the beginning of the next line. In the case
158 # of multiple lines, tokens can not exist on any of the lines except the
159 # last one, so the recorded value for previous lines are unused. We still
160 # fill the array however, to make sure the line count is correct.
161 self.lexobj.lineno += count
162 for i in range(count):
163 self.index.append(self.lexobj.lexpos)
164
165 def FileLineMsg(self, file, line, msg):
166 if file: return "%s(%d) : %s" % (file, line + 1, msg)
167 return "<BuiltIn> : %s" % msg
168
169 def SourceLine(self, file, line, pos):
170 caret = '\t^'.expandtabs(pos)
[email protected]4ad36962011-04-26 18:27:57171 # We decrement the line number since the array is 0 based while the
172 # line numbers are 1 based.
173 return "%s\n%s" % (self.lines[line - 1], caret)
[email protected]bd3f4b3d2011-03-23 20:01:40174
175 def ErrorMessage(self, file, line, pos, msg):
176 return "\n%s\n%s" % (
177 self.FileLineMsg(file, line, msg),
178 self.SourceLine(file, line, pos))
179
180 def SetData(self, filename, data):
[email protected]4ad36962011-04-26 18:27:57181 # Start with line 1, not zero
182 self.lexobj.lineno = 1
[email protected]bd3f4b3d2011-03-23 20:01:40183 self.lexobj.filename = filename
[email protected]bd3f4b3d2011-03-23 20:01:40184 self.lines = data.split('\n')
185 self.index = [0]
186 self.lexobj.input(data)
187
[email protected]5b497ed2011-05-15 22:08:56188 def __init__(self):
[email protected]bd3f4b3d2011-03-23 20:01:40189 self.lexobj = lex.lex(object=self, lextab=None, optimize=0)
[email protected]d3864f5c12011-04-08 15:19:04190
[email protected]bd3f4b3d2011-03-23 20:01:40191
192
193#
194# FilesToTokens
195#
196# From a set of source file names, generate a list of tokens.
197#
198def FilesToTokens(filenames, verbose=False):
199 lexer = IDLLexer()
200 outlist = []
201 for filename in filenames:
202 data = open(filename).read()
203 lexer.SetData(filename, data)
204 if verbose: sys.stdout.write(' Loaded %s...\n' % filename)
205 while 1:
206 t = lexer.lexobj.token()
207 if t is None: break
208 outlist.append(t)
209 return outlist
210
[email protected]16796362011-07-02 19:43:19211
212def TokensFromText(text):
213 lexer = IDLLexer()
214 lexer.SetData('unknown', text)
215 outlist = []
216 while 1:
217 t = lexer.lexobj.token()
218 if t is None: break
219 outlist.append(t.value)
220 return outlist
221
[email protected]bd3f4b3d2011-03-23 20:01:40222#
223# TextToTokens
224#
225# From a block of text, generate a list of tokens
226#
227def TextToTokens(source):
228 lexer = IDLLexer()
229 outlist = []
230 lexer.SetData('AUTO', source)
231 while 1:
232 t = lexer.lexobj.token()
233 if t is None: break
234 outlist.append(t.value)
235 return outlist
236
237
238#
239# TestSame
240#
241# From a set of token values, generate a new source text by joining with a
242# single space. The new source is then tokenized and compared against the
243# old set.
244#
[email protected]16796362011-07-02 19:43:19245def TestSame(values1):
246 # Recreate the source from the tokens. We use newline instead of whitespace
247 # since the '//' and #inline regex are line sensitive.
248 text = '\n'.join(values1)
249 values2 = TextToTokens(text)
250
251 count1 = len(values1)
252 count2 = len(values2)
253 if count1 != count2:
254 print "Size mismatch original %d vs %d\n" % (count1, count2)
255 if count1 > count2: count1 = count2
256
257 for i in range(count1):
258 if values1[i] != values2[i]:
259 print "%d >>%s<< >>%s<<" % (i, values1[i], values2[i])
[email protected]bd3f4b3d2011-03-23 20:01:40260
[email protected]5b497ed2011-05-15 22:08:56261 if GetOption('output'):
[email protected]bd3f4b3d2011-03-23 20:01:40262 sys.stdout.write('Generating original.txt and tokenized.txt\n')
263 open('original.txt', 'w').write(src1)
264 open('tokenized.txt', 'w').write(src2)
265
[email protected]16796362011-07-02 19:43:19266 if values1 == values2:
[email protected]bd3f4b3d2011-03-23 20:01:40267 sys.stdout.write('Same: Pass\n')
268 return 0
269
[email protected]16796362011-07-02 19:43:19270 print "****************\n%s\n%s***************\n" % (src1, src2)
[email protected]bd3f4b3d2011-03-23 20:01:40271 sys.stdout.write('Same: Failed\n')
272 return -1
273
274
275#
276# TestExpect
277#
278# From a set of tokens pairs, verify the type field of the second matches
279# the value of the first, so that:
280# INT 123 FLOAT 1.1
281# will generate a passing test, where the first token is the SYMBOL INT,
282# and the second token is the INT 123, third token is the SYMBOL FLOAT and
283# the fourth is the FLOAT 1.1, etc...
284def TestExpect(tokens):
285 count = len(tokens)
286 index = 0
287 errors = 0
288 while index < count:
289 type = tokens[index].value
290 token = tokens[index + 1]
291 index += 2
292
293 if type != token.type:
[email protected]d3864f5c12011-04-08 15:19:04294 sys.stderr.write('Mismatch: Expected %s, but got %s = %s.\n' %
[email protected]bd3f4b3d2011-03-23 20:01:40295 (type, token.type, token.value))
296 errors += 1
297
298 if not errors:
299 sys.stdout.write('Expect: Pass\n')
300 return 0
301
302 sys.stdout.write('Expect: Failed\n')
303 return -1
304
305
306
307
308def Main(args):
[email protected]5b497ed2011-05-15 22:08:56309 filenames = ParseOptions(args)
[email protected]d3864f5c12011-04-08 15:19:04310
[email protected]bd3f4b3d2011-03-23 20:01:40311 try:
[email protected]5b497ed2011-05-15 22:08:56312 tokens = FilesToTokens(filenames, GetOption('verbose'))
[email protected]bd3f4b3d2011-03-23 20:01:40313 values = [tok.value for tok in tokens]
[email protected]5b497ed2011-05-15 22:08:56314 if GetOption('output'): sys.stdout.write(' <> '.join(values) + '\n')
315 if GetOption('test'):
[email protected]d3864f5c12011-04-08 15:19:04316 if TestSame(values):
[email protected]bd3f4b3d2011-03-23 20:01:40317 return -1
[email protected]bd3f4b3d2011-03-23 20:01:40318 if TestExpect(tokens):
319 return -1
320 return 0
321
322 except lex.LexError as le:
323 sys.stderr.write('%s\n' % str(le))
324 return -1
325
[email protected]bd3f4b3d2011-03-23 20:01:40326if __name__ == '__main__':
327 sys.exit(Main(sys.argv[1:]))
328