summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJinWang An <jinwang.an@samsung.com>2021-01-05 16:03:13 +0900
committerJinWang An <jinwang.an@samsung.com>2021-01-05 16:03:13 +0900
commit97c028066d4ffdb2a9d653caa8701ad318e61437 (patch)
treeaa9d857b52fa1229746baee7fdf6474c72346002
parentcaf0b7d7cc794781eb71e95fdf3715c1c7dabc28 (diff)
downloadpython-pygments-97c028066d4ffdb2a9d653caa8701ad318e61437.tar.gz
python-pygments-97c028066d4ffdb2a9d653caa8701ad318e61437.tar.bz2
python-pygments-97c028066d4ffdb2a9d653caa8701ad318e61437.zip
Imported Upstream version 1.3upstream/1.3
-rw-r--r--AUTHORS11
-rw-r--r--CHANGES44
-rw-r--r--MANIFEST.in2
-rw-r--r--PKG-INFO2
-rw-r--r--Pygments.egg-info/PKG-INFO2
-rw-r--r--Pygments.egg-info/SOURCES.txt27
-rw-r--r--Pygments.egg-info/entry_points.txt3
-rw-r--r--docs/build/api.html2
-rw-r--r--docs/build/authors.html13
-rw-r--r--docs/build/changelog.html39
-rw-r--r--docs/build/cmdline.html2
-rw-r--r--docs/build/filterdevelopment.html2
-rw-r--r--docs/build/filters.html2
-rw-r--r--docs/build/formatterdevelopment.html2
-rw-r--r--docs/build/formatters.html2
-rw-r--r--docs/build/index.html2
-rw-r--r--docs/build/installation.html2
-rw-r--r--docs/build/integrate.html9
-rw-r--r--docs/build/lexerdevelopment.html2
-rw-r--r--docs/build/lexers.html206
-rw-r--r--docs/build/moinmoin.html2
-rw-r--r--docs/build/plugins.html2
-rw-r--r--docs/build/quickstart.html2
-rw-r--r--docs/build/rstdirective.html2
-rw-r--r--docs/build/styles.html2
-rw-r--r--docs/build/tokens.html2
-rw-r--r--docs/build/unicode.html2
-rw-r--r--docs/src/integrate.txt6
-rw-r--r--external/pygments.bashcomp38
-rwxr-xr-xpygmentize7
-rw-r--r--pygments/__init__.py4
-rw-r--r--pygments/filters/__init__.py9
-rw-r--r--pygments/formatters/__init__.py2
-rw-r--r--pygments/formatters/html.py9
-rw-r--r--pygments/lexer.py22
-rw-r--r--pygments/lexers/__init__.py9
-rw-r--r--pygments/lexers/_mapping.py17
-rw-r--r--pygments/lexers/agile.py22
-rw-r--r--pygments/lexers/asm.py4
-rw-r--r--pygments/lexers/compiled.py601
-rw-r--r--pygments/lexers/functional.py6
-rw-r--r--pygments/lexers/math.py52
-rw-r--r--pygments/lexers/other.py33
-rw-r--r--pygments/lexers/parsers.py10
-rw-r--r--pygments/lexers/templates.py103
-rw-r--r--pygments/lexers/text.py37
-rw-r--r--pygments/lexers/web.py920
-rw-r--r--pygments/token.py5
-rw-r--r--pygments/util.py4
-rwxr-xr-x[-rw-r--r--]scripts/find_error.py148
-rwxr-xr-xsetup.py17
-rwxr-xr-xtests/examplefiles/CPDictionary.j611
-rw-r--r--tests/examplefiles/OrderedMap.hx584
-rw-r--r--tests/examplefiles/Sorting.mod470
-rw-r--r--tests/examplefiles/aspx-cs_example (renamed from tests/examplefiles/example.aspx)0
-rw-r--r--tests/examplefiles/demo.cfm38
-rw-r--r--tests/examplefiles/r-console-transcript.Rout38
-rw-r--r--tests/examplefiles/test.adb211
-rw-r--r--tests/examplefiles/test.flx57
-rw-r--r--tests/examplefiles/test.mod374
-rw-r--r--tests/examplefiles/test.php9
-rw-r--r--tests/examplefiles/underscore.coffee603
-rw-r--r--tests/examplefiles/xml_example (renamed from tests/examplefiles/example.xml)0
-rw-r--r--tests/support.pycbin583 -> 0 bytes
-rw-r--r--tests/test_basic_api.py337
-rw-r--r--tests/test_basic_api.pycbin9769 -> 0 bytes
-rw-r--r--tests/test_clexer.pycbin1473 -> 0 bytes
-rw-r--r--tests/test_cmdline.pycbin4506 -> 0 bytes
-rw-r--r--tests/test_examplefiles.py2
-rw-r--r--tests/test_examplefiles.pycbin2237 -> 0 bytes
-rw-r--r--tests/test_html_formatter.py6
-rw-r--r--tests/test_html_formatter.pycbin4693 -> 0 bytes
-rw-r--r--tests/test_latex_formatter.pycbin1826 -> 0 bytes
-rw-r--r--tests/test_regexlexer.pycbin1645 -> 0 bytes
-rw-r--r--tests/test_token.pycbin2041 -> 0 bytes
-rw-r--r--tests/test_using_api.pycbin1966 -> 0 bytes
-rw-r--r--tests/test_util.pycbin4100 -> 0 bytes
77 files changed, 5451 insertions, 364 deletions
diff --git a/AUTHORS b/AUTHORS
index 34cc6be..d102686 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -8,9 +8,11 @@ Other contributors, listed alphabetically, are:
* Kumar Appaiah -- Debian control lexer
* Ali Afshar -- image formatter
* Andreas Amann -- AppleScript lexer
+* Jeremy Ashkenas -- CoffeeScript lexer
* Stefan Matthias Aust -- Smalltalk lexer
* Ben Bangert -- Mako lexers
* Max Battcher -- Darcs patch lexer
+* Paul Baumgart, 280 North, Inc. -- Objective-J lexer
* Michael Bayer -- Myghty lexers
* Jarrett Billingsley -- MiniD lexer
* Adam Blinkinsop -- Haskell, Redcode lexers
@@ -18,17 +20,21 @@ Other contributors, listed alphabetically, are:
* Pierre Bourdon -- bugfixes
* Christopher Creutzig -- MuPAD lexer
* Pete Curry -- bugfixes
+* Owen Durni -- haXe lexer
* Nick Efford -- Python 3 lexer
* Artem Egorkine -- terminal256 formatter
* Laurent Gautier -- R/S lexer
* Krzysiek Goj -- Scala lexer
* Matt Good -- Genshi, Cheetah lexers
+* Patrick Gotthardt -- PHP namespaces support
* Olivier Guibe -- Asymptote lexer
* Matthew Harrison -- SVG formatter
* Steven Hazel -- Tcl lexer
* Aslak Hellesøy -- Gherkin lexer
+* David Hess, Fish Software, Inc. -- Objective-J lexer
* Varun Hiremath -- Debian control lexer
* Dennis Kaarsemaker -- sources.list lexer
+* Benjamin Kowarsch -- Modula-2 lexer
* Marek Kubica -- Scheme lexer
* Jochen Kupperschmidt -- Markdown processor
* Gerd Kurzbach -- Modelica lexer
@@ -37,7 +43,7 @@ Other contributors, listed alphabetically, are:
* Kirk McDonald -- D lexer
* Lukas Meuser -- BBCode formatter, Lua lexer
* Paulo Moura -- Logtalk lexer
-* Ana Nelson -- Ragel, ANTLR lexers
+* Ana Nelson -- Ragel, ANTLR, R console lexers
* Nam T. Nguyen -- Monokai style
* Jesper Noehr -- HTML formatter "anchorlinenos"
* Jonas Obrist -- BBCode lexer
@@ -50,13 +56,16 @@ Other contributors, listed alphabetically, are:
* Mario Ruggier -- Evoque lexers
* Stou Sandalski -- NumPy, FORTRAN, tcsh and XSLT lexers
* Matteo Sasso -- Common Lisp lexer
+* Joe Schafer -- Ada lexer
* Ken Schutte -- Matlab lexers
* Tassilo Schweyer -- Io, MOOCode lexers
* Joerg Sieker -- ABAP lexer
* Kirill Simonov -- YAML lexer
* Tiberius Teng -- default style overhaul
* Jeremy Thurgood -- Erlang, Squid config lexers
+* Erick Tryzelaar -- Felix lexer
* Whitney Young -- ObjectiveC lexer
+* Nathan Weizenbaum -- Haml and Sass lexers
* Dietmar Winkler -- Modelica lexer
* Nils Winter -- Smalltalk lexer
* Davy Wybiral -- Clojure lexer
diff --git a/CHANGES b/CHANGES
index b81efa0..59d8860 100644
--- a/CHANGES
+++ b/CHANGES
@@ -3,6 +3,45 @@ Pygments changelog
Issue numbers refer to the tracker at http://dev.pocoo.org/projects/pygments/.
+Version 1.3
+-----------
+(codename Schneeglöckchen, released Mar 01, 2010)
+
+- Added the ``ensurenl`` lexer option, which can be used to suppress the
+ automatic addition of a newline to the lexer input.
+
+- Lexers added:
+
+ * Ada
+ * Coldfusion
+ * Modula-2
+ * haXe
+ * R console
+ * Objective-J
+ * Haml and Sass
+ * CoffeeScript
+
+- Enhanced reStructuredText highlighting.
+
+- Added support for PHP 5.3 namespaces in the PHP lexer.
+
+- Added a bash completion script for `pygmentize`, to the external/
+ directory (#466).
+
+- Fixed a bug in `do_insertions()` used for multi-lexer languages.
+
+- Fixed a Ruby regex highlighting bug (#476).
+
+- Fixed regex highlighting bugs in Perl lexer (#258).
+
+- Add small enhancements to the C lexer (#467) and Bash lexer (#469).
+
+- Small fixes for the Tcl, Debian control file, Nginx config,
+ Smalltalk, Objective-C, Clojure, Lua lexers.
+
+- Gherkin lexer: Fixed single apostrophe bug and added new i18n keywords.
+
+
Version 1.2.2
-------------
(bugfix release, released Jan 02, 2010)
@@ -33,7 +72,10 @@ Version 1.2
* Go
* Gherkin (Cucumber)
* CMake
- * OOC
+ * Ooc
+ * Coldfusion
+ * haXe
+ * R console
- Added options for rendering LaTeX in source code comments in the
LaTeX formatter (#461).
diff --git a/MANIFEST.in b/MANIFEST.in
index 2a39ef4..8fa0dfe 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,4 @@
-include external/*.py
+include external/*
include Makefile CHANGES LICENSE AUTHORS TODO ez_setup.py
recursive-include tests *
recursive-include docs *
diff --git a/PKG-INFO b/PKG-INFO
index 98c415d..1c8da05 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.0
Name: Pygments
-Version: 1.2.2
+Version: 1.3
Summary: Pygments is a syntax highlighting package written in Python.
Home-page: http://pygments.org/
Author: Georg Brandl
diff --git a/Pygments.egg-info/PKG-INFO b/Pygments.egg-info/PKG-INFO
index 98c415d..1c8da05 100644
--- a/Pygments.egg-info/PKG-INFO
+++ b/Pygments.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.0
Name: Pygments
-Version: 1.2.2
+Version: 1.3
Summary: Pygments is a syntax highlighting package written in Python.
Home-page: http://pygments.org/
Author: Georg Brandl
diff --git a/Pygments.egg-info/SOURCES.txt b/Pygments.egg-info/SOURCES.txt
index a885aaa..1e12c02 100644
--- a/Pygments.egg-info/SOURCES.txt
+++ b/Pygments.egg-info/SOURCES.txt
@@ -5,12 +5,12 @@ MANIFEST.in
Makefile
TODO
ez_setup.py
-pygmentize
setup.cfg
setup.py
Pygments.egg-info/PKG-INFO
Pygments.egg-info/SOURCES.txt
Pygments.egg-info/dependency_links.txt
+Pygments.egg-info/entry_points.txt
Pygments.egg-info/not-zip-safe
Pygments.egg-info/top_level.txt
docs/generate.py
@@ -57,6 +57,7 @@ docs/src/tokens.txt
docs/src/unicode.txt
external/markdown-processor.py
external/moin-parser.py
+external/pygments.bashcomp
external/rst-directive-old.py
external/rst-directive.py
pygments/__init__.py
@@ -133,27 +134,16 @@ scripts/vim2pygments.py
tests/old_run.py
tests/run.py
tests/support.py
-tests/support.pyc
tests/test_basic_api.py
-tests/test_basic_api.pyc
tests/test_clexer.py
-tests/test_clexer.pyc
tests/test_cmdline.py
-tests/test_cmdline.pyc
tests/test_examplefiles.py
-tests/test_examplefiles.pyc
tests/test_html_formatter.py
-tests/test_html_formatter.pyc
tests/test_latex_formatter.py
-tests/test_latex_formatter.pyc
tests/test_regexlexer.py
-tests/test_regexlexer.pyc
tests/test_token.py
-tests/test_token.pyc
tests/test_using_api.py
-tests/test_using_api.pyc
tests/test_util.py
-tests/test_util.pyc
tests/dtds/HTML4-f.dtd
tests/dtds/HTML4-s.dtd
tests/dtds/HTML4.dcl
@@ -164,19 +154,23 @@ tests/dtds/HTMLspec.ent
tests/dtds/HTMLsym.ent
tests/examplefiles/ANTLRv3.g
tests/examplefiles/AlternatingGroup.mu
+tests/examplefiles/CPDictionary.j
tests/examplefiles/Constants.mo
tests/examplefiles/DancingSudoku.lhs
tests/examplefiles/Errors.scala
tests/examplefiles/Intro.java
tests/examplefiles/Makefile
tests/examplefiles/Object.st
+tests/examplefiles/OrderedMap.hx
tests/examplefiles/RegexMatcher.ns2
tests/examplefiles/SmallCheck.hs
+tests/examplefiles/Sorting.mod
tests/examplefiles/Sudoku.lhs
tests/examplefiles/apache2.conf
tests/examplefiles/as3_test.as
tests/examplefiles/as3_test2.as
tests/examplefiles/as3_test3.as
+tests/examplefiles/aspx-cs_example
tests/examplefiles/badcase.java
tests/examplefiles/batchfile.bat
tests/examplefiles/boot-9.scm
@@ -186,12 +180,12 @@ tests/examplefiles/classes.dylan
tests/examplefiles/condensed_ruby.rb
tests/examplefiles/database.pytb
tests/examplefiles/de.MoinMoin.po
+tests/examplefiles/demo.cfm
tests/examplefiles/django_sample.html+django
tests/examplefiles/dwarf.cw
tests/examplefiles/erl_session
tests/examplefiles/escape_semicolon.clj
tests/examplefiles/evil_regex.js
-tests/examplefiles/example.aspx
tests/examplefiles/example.c
tests/examplefiles/example.cpp
tests/examplefiles/example.lua
@@ -202,7 +196,6 @@ tests/examplefiles/example.rhtml
tests/examplefiles/example.sh-session
tests/examplefiles/example.weechatlog
tests/examplefiles/example.xhtml
-tests/examplefiles/example.xml
tests/examplefiles/example.yaml
tests/examplefiles/example2.aspx
tests/examplefiles/firefox.mak
@@ -244,6 +237,7 @@ tests/examplefiles/pycon_test.pycon
tests/examplefiles/pytb_test2.pytb
tests/examplefiles/python25-bsd.mak
tests/examplefiles/qsort.prolog
+tests/examplefiles/r-console-transcript.Rout
tests/examplefiles/ragel-cpp_rlscan
tests/examplefiles/ragel-cpp_snippet
tests/examplefiles/regex.js
@@ -258,6 +252,7 @@ tests/examplefiles/sqlite3.sqlite3-console
tests/examplefiles/squid.conf
tests/examplefiles/string_delimiters.d
tests/examplefiles/test.R
+tests/examplefiles/test.adb
tests/examplefiles/test.asy
tests/examplefiles/test.bas
tests/examplefiles/test.boo
@@ -266,9 +261,11 @@ tests/examplefiles/test.css
tests/examplefiles/test.d
tests/examplefiles/test.erl
tests/examplefiles/test.evoque
+tests/examplefiles/test.flx
tests/examplefiles/test.html
tests/examplefiles/test.java
tests/examplefiles/test.jsp
+tests/examplefiles/test.mod
tests/examplefiles/test.moo
tests/examplefiles/test.myt
tests/examplefiles/test.pas
@@ -281,6 +278,8 @@ tests/examplefiles/test.tcsh
tests/examplefiles/test.xsl
tests/examplefiles/truncated.pytb
tests/examplefiles/type.lisp
+tests/examplefiles/underscore.coffee
tests/examplefiles/unicode.applescript
tests/examplefiles/while.pov
+tests/examplefiles/xml_example
tests/examplefiles/zmlrpc.f90 \ No newline at end of file
diff --git a/Pygments.egg-info/entry_points.txt b/Pygments.egg-info/entry_points.txt
new file mode 100644
index 0000000..756d801
--- /dev/null
+++ b/Pygments.egg-info/entry_points.txt
@@ -0,0 +1,3 @@
+[console_scripts]
+pygmentize = pygments.cmdline:main
+
diff --git a/docs/build/api.html b/docs/build/api.html
index 4cf4217..3ca8e31 100644
--- a/docs/build/api.html
+++ b/docs/build/api.html
@@ -453,6 +453,6 @@ or a tuple, it is returned as a list.</dd>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:10.979303
+<!-- generated on: 2010-03-01 21:18:22.940790
file id: api -->
</html> \ No newline at end of file
diff --git a/docs/build/authors.html b/docs/build/authors.html
index ae566e1..a2ce275 100644
--- a/docs/build/authors.html
+++ b/docs/build/authors.html
@@ -217,9 +217,11 @@ div.toc h2 {
<li>Kumar Appaiah -- Debian control lexer</li>
<li>Ali Afshar -- image formatter</li>
<li>Andreas Amann -- AppleScript lexer</li>
+<li>Jeremy Ashkenas -- CoffeeScript lexer</li>
<li>Stefan Matthias Aust -- Smalltalk lexer</li>
<li>Ben Bangert -- Mako lexers</li>
<li>Max Battcher -- Darcs patch lexer</li>
+<li>Paul Baumgart, 280 North, Inc. -- Objective-J lexer</li>
<li>Michael Bayer -- Myghty lexers</li>
<li>Jarrett Billingsley -- MiniD lexer</li>
<li>Adam Blinkinsop -- Haskell, Redcode lexers</li>
@@ -227,17 +229,21 @@ div.toc h2 {
<li>Pierre Bourdon -- bugfixes</li>
<li>Christopher Creutzig -- MuPAD lexer</li>
<li>Pete Curry -- bugfixes</li>
+<li>Owen Durni -- haXe lexer</li>
<li>Nick Efford -- Python 3 lexer</li>
<li>Artem Egorkine -- terminal256 formatter</li>
<li>Laurent Gautier -- R/S lexer</li>
<li>Krzysiek Goj -- Scala lexer</li>
<li>Matt Good -- Genshi, Cheetah lexers</li>
+<li>Patrick Gotthardt -- PHP namespaces support</li>
<li>Olivier Guibe -- Asymptote lexer</li>
<li>Matthew Harrison -- SVG formatter</li>
<li>Steven Hazel -- Tcl lexer</li>
<li>Aslak Hellesøy -- Gherkin lexer</li>
+<li>David Hess, Fish Software, Inc. -- Objective-J lexer</li>
<li>Varun Hiremath -- Debian control lexer</li>
<li>Dennis Kaarsemaker -- sources.list lexer</li>
+<li>Benjamin Kowarsch -- Modula-2 lexer</li>
<li>Marek Kubica -- Scheme lexer</li>
<li>Jochen Kupperschmidt -- Markdown processor</li>
<li>Gerd Kurzbach -- Modelica lexer</li>
@@ -246,7 +252,7 @@ div.toc h2 {
<li>Kirk McDonald -- D lexer</li>
<li>Lukas Meuser -- BBCode formatter, Lua lexer</li>
<li>Paulo Moura -- Logtalk lexer</li>
-<li>Ana Nelson -- Ragel, ANTLR lexers</li>
+<li>Ana Nelson -- Ragel, ANTLR, R console lexers</li>
<li>Nam T. Nguyen -- Monokai style</li>
<li>Jesper Noehr -- HTML formatter &quot;anchorlinenos&quot;</li>
<li>Jonas Obrist -- BBCode lexer</li>
@@ -259,13 +265,16 @@ div.toc h2 {
<li>Mario Ruggier -- Evoque lexers</li>
<li>Stou Sandalski -- NumPy, FORTRAN, tcsh and XSLT lexers</li>
<li>Matteo Sasso -- Common Lisp lexer</li>
+<li>Joe Schafer -- Ada lexer</li>
<li>Ken Schutte -- Matlab lexers</li>
<li>Tassilo Schweyer -- Io, MOOCode lexers</li>
<li>Joerg Sieker -- ABAP lexer</li>
<li>Kirill Simonov -- YAML lexer</li>
<li>Tiberius Teng -- default style overhaul</li>
<li>Jeremy Thurgood -- Erlang, Squid config lexers</li>
+<li>Erick Tryzelaar -- Felix lexer</li>
<li>Whitney Young -- ObjectiveC lexer</li>
+<li>Nathan Weizenbaum -- Haml and Sass lexers</li>
<li>Dietmar Winkler -- Modelica lexer</li>
<li>Nils Winter -- Smalltalk lexer</li>
<li>Davy Wybiral -- Clojure lexer</li>
@@ -274,6 +283,6 @@ div.toc h2 {
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:11.616011
+<!-- generated on: 2010-03-01 21:18:22.057737
file id: authors -->
</html> \ No newline at end of file
diff --git a/docs/build/changelog.html b/docs/build/changelog.html
index 6e38a8b..0177758 100644
--- a/docs/build/changelog.html
+++ b/docs/build/changelog.html
@@ -213,6 +213,8 @@ div.toc h2 {
<h2>Contents</h2>
<ul class="contents">
+ <li><a href="#version-1-3">Version 1.3</a></li>
+
<li><a href="#version-1-2-2">Version 1.2.2</a></li>
<li><a href="#version-1-2-1">Version 1.2.1</a></li>
@@ -251,6 +253,36 @@ div.toc h2 {
</div>
<p>Issue numbers refer to the tracker at <a class="reference external" href="http://dev.pocoo.org/projects/pygments/">http://dev.pocoo.org/projects/pygments/</a>.</p>
+<div class="section" id="version-1-3">
+<h3>Version 1.3</h3>
+<p>(codename Schneeglöckchen, released Mar 01, 2010)</p>
+<ul class="simple">
+<li>Added the <tt class="docutils literal">ensurenl</tt> lexer option, which can be used to suppress the
+automatic addition of a newline to the lexer input.</li>
+<li>Lexers added:<ul>
+<li>Ada</li>
+<li>Coldfusion</li>
+<li>Modula-2</li>
+<li>haXe</li>
+<li>R console</li>
+<li>Objective-J</li>
+<li>Haml and Sass</li>
+<li>CoffeeScript</li>
+</ul>
+</li>
+<li>Enhanced reStructuredText highlighting.</li>
+<li>Added support for PHP 5.3 namespaces in the PHP lexer.</li>
+<li>Added a bash completion script for <cite>pygmentize</cite>, to the external/
+directory (#466).</li>
+<li>Fixed a bug in <cite>do_insertions()</cite> used for multi-lexer languages.</li>
+<li>Fixed a Ruby regex highlighting bug (#476).</li>
+<li>Fixed regex highlighting bugs in Perl lexer (#258).</li>
+<li>Add small enhancements to the C lexer (#467) and Bash lexer (#469).</li>
+<li>Small fixes for the Tcl, Debian control file, Nginx config,
+Smalltalk, Objective-C, Clojure, Lua lexers.</li>
+<li>Gherkin lexer: Fixed single apostrophe bug and added new i18n keywords.</li>
+</ul>
+</div>
<div class="section" id="version-1-2-2">
<h3>Version 1.2.2</h3>
<p>(bugfix release, released Jan 02, 2010)</p>
@@ -278,7 +310,10 @@ console traceback, resulting in clobbered output.</li>
<li>Go</li>
<li>Gherkin (Cucumber)</li>
<li>CMake</li>
-<li>OOC</li>
+<li>Ooc</li>
+<li>Coldfusion</li>
+<li>haXe</li>
+<li>R console</li>
</ul>
</li>
<li>Added options for rendering LaTeX in source code comments in the
@@ -702,6 +737,6 @@ continuations.</li>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:11.743625
+<!-- generated on: 2010-03-01 21:18:21.373497
file id: changelog -->
</html> \ No newline at end of file
diff --git a/docs/build/cmdline.html b/docs/build/cmdline.html
index e382e6d..6db3dd6 100644
--- a/docs/build/cmdline.html
+++ b/docs/build/cmdline.html
@@ -348,6 +348,6 @@ formatter is the terminal encoding (<cite>sys.stdout.encoding</cite>).</li>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:12.317732
+<!-- generated on: 2010-03-01 21:18:20.444981
file id: cmdline -->
</html> \ No newline at end of file
diff --git a/docs/build/filterdevelopment.html b/docs/build/filterdevelopment.html
index 97ba876..f6de0c3 100644
--- a/docs/build/filterdevelopment.html
+++ b/docs/build/filterdevelopment.html
@@ -277,6 +277,6 @@ decorated function for filtering.</p>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:12.447556
+<!-- generated on: 2010-03-01 21:18:23.568890
file id: filterdevelopment -->
</html> \ No newline at end of file
diff --git a/docs/build/filters.html b/docs/build/filters.html
index a05e4b0..e370f46 100644
--- a/docs/build/filters.html
+++ b/docs/build/filters.html
@@ -407,6 +407,6 @@ code to your styleguide.</p>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:12.534561
+<!-- generated on: 2010-03-01 21:18:21.142721
file id: filters -->
</html> \ No newline at end of file
diff --git a/docs/build/formatterdevelopment.html b/docs/build/formatterdevelopment.html
index cc4e9cb..8a27246 100644
--- a/docs/build/formatterdevelopment.html
+++ b/docs/build/formatterdevelopment.html
@@ -369,6 +369,6 @@ is up to the formatter) and has to return a string or <tt class="docutils litera
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:12.749801
+<!-- generated on: 2010-03-01 21:18:23.141347
file id: formatterdevelopment -->
</html> \ No newline at end of file
diff --git a/docs/build/formatters.html b/docs/build/formatters.html
index 478ee79..b51578d 100644
--- a/docs/build/formatters.html
+++ b/docs/build/formatters.html
@@ -903,6 +903,6 @@ no support for common styles.</p>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:12.903585
+<!-- generated on: 2010-03-01 21:18:22.349606
file id: formatters -->
</html> \ No newline at end of file
diff --git a/docs/build/index.html b/docs/build/index.html
index 4a19e0c..ec386e8 100644
--- a/docs/build/index.html
+++ b/docs/build/index.html
@@ -256,6 +256,6 @@ look <a class="reference external" href="http://pygments.org/contribute/">here</
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:13.756054
+<!-- generated on: 2010-03-01 21:18:22.245954
file id: index -->
</html> \ No newline at end of file
diff --git a/docs/build/installation.html b/docs/build/installation.html
index 815263c..e7ef9c4 100644
--- a/docs/build/installation.html
+++ b/docs/build/installation.html
@@ -276,6 +276,6 @@ is run, the sources are updated from Subversion. -->
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:13.891713
+<!-- generated on: 2010-03-01 21:18:16.987157
file id: installation -->
</html> \ No newline at end of file
diff --git a/docs/build/integrate.html b/docs/build/integrate.html
index 903d761..06773ec 100644
--- a/docs/build/integrate.html
+++ b/docs/build/integrate.html
@@ -221,6 +221,8 @@ div.toc h2 {
<li><a href="#textmate">TextMate</a></li>
+ <li><a href="#bash-completion">Bash completion</a></li>
+
</ul>
</div>
@@ -247,9 +249,14 @@ You can copy and adapt it to your liking.</p>
<p>Antonio Cangiano has created a Pygments bundle for TextMate that allows to
colorize code via a simple menu option. It can be found <a class="reference external" href="http://antoniocangiano.com/2008/10/28/pygments-textmate-bundle/">here</a>.</p>
</div>
+<div class="section" id="bash-completion">
+<h3>Bash completion</h3>
+<p>The source distribution contains a file <tt class="docutils literal">external/pygments.bashcomp</tt> that
+sets up completion for the <tt class="docutils literal">pygmentize</tt> command in bash.</p>
+</div>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:14.068980
+<!-- generated on: 2010-03-01 21:18:22.168270
file id: integrate -->
</html> \ No newline at end of file
diff --git a/docs/build/lexerdevelopment.html b/docs/build/lexerdevelopment.html
index 7602ec3..c7d5207 100644
--- a/docs/build/lexerdevelopment.html
+++ b/docs/build/lexerdevelopment.html
@@ -686,6 +686,6 @@ the <tt class="docutils literal">get_tokens_unprocessed()</tt> method. The follo
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:14.208796
+<!-- generated on: 2010-03-01 21:18:20.659342
file id: lexerdevelopment -->
</html> \ No newline at end of file
diff --git a/docs/build/lexers.html b/docs/build/lexers.html
index 70627a1..84c55e8 100644
--- a/docs/build/lexers.html
+++ b/docs/build/lexers.html
@@ -442,7 +442,7 @@ language) source.</p>
<tbody valign="top">
<tr class="field"><th class="field-name">Short names:</th><td class="field-body">python, py</td>
</tr>
-<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.py, *.pyw, *.sc, SConstruct, SConscript</td>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.py, *.pyw, *.sc, SConstruct, SConscript, *.tac</td>
</tr>
<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">text/x-python, application/x-python</td>
</tr>
@@ -639,6 +639,23 @@ language) source.</p>
</div>
<div class="section" id="lexers-for-compiled-languages">
<h3>Lexers for compiled languages</h3>
+<p><cite>AdaLexer</cite></p>
+<blockquote>
+<p>For Ada source code.</p>
+<p><em>New in Pygments 1.3.</em></p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">ada, ada95ada2005</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.adb, *.ads, *.ada</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">text/x-ada</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
<p><cite>CLexer</cite></p>
<blockquote>
<p>For C source code with preprocessor directives.</p>
@@ -752,6 +769,23 @@ Default is to consider all of them builtin.</dd>
</tbody>
</table>
</blockquote>
+<p><cite>FelixLexer</cite></p>
+<blockquote>
+<p>For <a class="reference external" href="http://www.felix-lang.org">Felix</a> source code.</p>
+<p><em>New in Pygments 1.2.</em></p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">felix, flx</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.flx, *.flxh</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">text/x-felix</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
<p><cite>FortranLexer</cite></p>
<blockquote>
<p>Lexer for FORTRAN 90 code.</p>
@@ -818,6 +852,34 @@ Default is to consider all of them builtin.</dd>
</tbody>
</table>
</blockquote>
+<p><cite>Modula2Lexer</cite></p>
+<blockquote>
+<p>For <a class="reference external" href="http://www.modula2.org/">Modula-2</a> source code.</p>
+<p>Additional options that determine which keywords are highlighted:</p>
+<dl class="docutils">
+<dt><cite>pim</cite></dt>
+<dd>Select PIM Modula-2 dialect (default: True).</dd>
+<dt><cite>iso</cite></dt>
+<dd>Select ISO Modula-2 dialect (default: False).</dd>
+<dt><cite>objm2</cite></dt>
+<dd>Select Objective Modula-2 dialect (default: False).</dd>
+<dt><cite>gm2ext</cite></dt>
+<dd>Also highlight GNU extensions (default: False).</dd>
+</dl>
+<p><em>New in Pygments 1.3.</em></p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">modula2, m2</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.def, *.mod</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">text/x-modula2</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
<p><cite>ObjectiveCLexer</cite></p>
<blockquote>
<p>For Objective-C source code with preprocessor directives.</p>
@@ -1214,6 +1276,22 @@ Contributed by Christopher Creutzig &lt;<a class="reference external" href="mail
</tbody>
</table>
</blockquote>
+<p><cite>RConsoleLexer</cite></p>
+<blockquote>
+<p>For R console transcripts or R CMD BATCH output files.</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">rconsole, rout</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.Rout</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">None</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
<p><cite>SLexer</cite></p>
<blockquote>
<p>For S, S-plus, and R source code.</p>
@@ -1277,7 +1355,7 @@ Contributed by Andreas Amann &lt;<a class="reference external" href="mailto:aama
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
-<tr class="field"><th class="field-name">Short names:</th><td class="field-body">asy</td>
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">asy, asymptote</td>
</tr>
<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.asy</td>
</tr>
@@ -1288,15 +1366,15 @@ Contributed by Andreas Amann &lt;<a class="reference external" href="mailto:aama
</blockquote>
<p><cite>BashLexer</cite></p>
<blockquote>
-<p>Lexer for (ba)sh shell scripts.</p>
+<p>Lexer for (ba|k|)sh shell scripts.</p>
<p><em>New in Pygments 0.6.</em></p>
<table class="docutils field-list" frame="void" rules="none">
<col class="field-name" />
<col class="field-body" />
<tbody valign="top">
-<tr class="field"><th class="field-name">Short names:</th><td class="field-body">bash, sh</td>
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">bash, sh, ksh</td>
</tr>
-<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.sh, *.ebuild, *.eclass</td>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.sh, *.ksh, *.bash, *.ebuild, *.eclass</td>
</tr>
<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">application/x-sh, application/x-shellscript</td>
</tr>
@@ -2025,6 +2103,38 @@ with the <cite>XmlLexer</cite>.</p>
</tbody>
</table>
</blockquote>
+<p><cite>ColdfusionHtmlLexer</cite></p>
+<blockquote>
+<p>Coldfusion markup in html</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">cfm</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.cfm, *.cfml, *.cfc</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">application/x-coldfusion</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
+<p><cite>ColdfusionLexer</cite></p>
+<blockquote>
+<p>Coldfusion statements</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">cfs</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">None</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">None</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
<p><cite>CssDjangoLexer</cite></p>
<blockquote>
<p>Subclass of the <cite>DjangoLexer</cite> that highlights unlexed data with the
@@ -3093,6 +3203,23 @@ language.</p>
</tbody>
</table>
</blockquote>
+<p><cite>CoffeeScriptLexer</cite></p>
+<blockquote>
+<p>For <a class="reference external" href="http://jashkenas.github.com/coffee-script/">CoffeeScript</a> source code.</p>
+<p><em>New in Pygments 1.3.</em></p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">coffee-script, coffeescript</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.coffee</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">text/coffeescript</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
<p><cite>CssLexer</cite></p>
<blockquote>
<p>For CSS (Cascading Style Sheets).</p>
@@ -3109,6 +3236,39 @@ language.</p>
</tbody>
</table>
</blockquote>
+<p><cite>HamlLexer</cite></p>
+<blockquote>
+<p>For Haml markup.</p>
+<p><em>New in Pygments 1.3.</em></p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">haml, HAML</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.haml</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">text/x-haml</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
+<p><cite>HaxeLexer</cite></p>
+<blockquote>
+<p>For haXe source code (<a class="reference external" href="http://haxe.org/">http://haxe.org/</a>).</p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">hx, haXe</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.hx</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">text/haxe</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
<p><cite>HtmlLexer</cite></p>
<blockquote>
<p>For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
@@ -3159,6 +3319,23 @@ Nested AS3 in &lt;script&gt; tags is highlighted by the appropriate lexer.</p>
</tbody>
</table>
</blockquote>
+<p><cite>ObjectiveJLexer</cite></p>
+<blockquote>
+<p>For Objective-J source code with preprocessor directives.</p>
+<p><em>New in Pygments 1.3.</em></p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">objective-j, objectivej, obj-j, objj</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.j</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">text/x-objective-j</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
<p><cite>PhpLexer</cite></p>
<blockquote>
<p>For <a class="reference external" href="http://www.php.net/">PHP</a> source code.
@@ -3200,6 +3377,23 @@ the php documentation.</p>
</tbody>
</table>
</blockquote>
+<p><cite>SassLexer</cite></p>
+<blockquote>
+<p>For Sass stylesheets.</p>
+<p><em>New in Pygments 1.3.</em></p>
+<table class="docutils field-list" frame="void" rules="none">
+<col class="field-name" />
+<col class="field-body" />
+<tbody valign="top">
+<tr class="field"><th class="field-name">Short names:</th><td class="field-body">sass, SASS</td>
+</tr>
+<tr class="field"><th class="field-name">Filename patterns:</th><td class="field-body">*.sass</td>
+</tr>
+<tr class="field"><th class="field-name">Mimetypes:</th><td class="field-body">text/x-sass</td>
+</tr>
+</tbody>
+</table>
+</blockquote>
<p><cite>XmlLexer</cite></p>
<blockquote>
<p>Generic lexer for XML (eXtensible Markup Language).</p>
@@ -3255,6 +3449,6 @@ in the form <tt class="docutils literal">(name, aliases, filetypes, mimetypes)</
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:14.909677
+<!-- generated on: 2010-03-01 21:18:17.323492
file id: lexers -->
</html> \ No newline at end of file
diff --git a/docs/build/moinmoin.html b/docs/build/moinmoin.html
index 007e125..6f3e3c2 100644
--- a/docs/build/moinmoin.html
+++ b/docs/build/moinmoin.html
@@ -240,6 +240,6 @@ can set the <tt class="docutils literal">INLINESTYLES</tt> option to True.</p>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:19.009047
+<!-- generated on: 2010-03-01 21:18:20.345639
file id: moinmoin -->
</html> \ No newline at end of file
diff --git a/docs/build/plugins.html b/docs/build/plugins.html
index ed7484b..95641ad 100644
--- a/docs/build/plugins.html
+++ b/docs/build/plugins.html
@@ -289,6 +289,6 @@ distribution.</p>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:19.167617
+<!-- generated on: 2010-03-01 21:18:20.160548
file id: plugins -->
</html> \ No newline at end of file
diff --git a/docs/build/quickstart.html b/docs/build/quickstart.html
index 64cc0f0..92e3a75 100644
--- a/docs/build/quickstart.html
+++ b/docs/build/quickstart.html
@@ -385,6 +385,6 @@ $ pygmentize -S default -f html &gt; style.css
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:19.352414
+<!-- generated on: 2010-03-01 21:18:21.849281
file id: quickstart -->
</html> \ No newline at end of file
diff --git a/docs/build/rstdirective.html b/docs/build/rstdirective.html
index d7edf6b..bbec08f 100644
--- a/docs/build/rstdirective.html
+++ b/docs/build/rstdirective.html
@@ -224,6 +224,6 @@ if the `handlecodeblocks` option is true. -->
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:19.532040
+<!-- generated on: 2010-03-01 21:18:21.337328
file id: rstdirective -->
</html> \ No newline at end of file
diff --git a/docs/build/styles.html b/docs/build/styles.html
index 059f90c..ee224ef 100644
--- a/docs/build/styles.html
+++ b/docs/build/styles.html
@@ -336,6 +336,6 @@ a way to iterate over all styles:</p>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:19.589769
+<!-- generated on: 2010-03-01 21:18:23.630305
file id: styles -->
</html> \ No newline at end of file
diff --git a/docs/build/tokens.html b/docs/build/tokens.html
index a8c0483..4290e09 100644
--- a/docs/build/tokens.html
+++ b/docs/build/tokens.html
@@ -536,6 +536,6 @@ highlight a programming language but a patch file.</p>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:19.766835
+<!-- generated on: 2010-03-01 21:18:23.269356
file id: tokens -->
</html> \ No newline at end of file
diff --git a/docs/build/unicode.html b/docs/build/unicode.html
index 0cb8f52..7a09e8b 100644
--- a/docs/build/unicode.html
+++ b/docs/build/unicode.html
@@ -244,6 +244,6 @@ input and output encodings.</p>
</div>
</body>
-<!-- generated on: 2010-01-02 22:27:20.165454
+<!-- generated on: 2010-03-01 21:18:22.880164
file id: unicode -->
</html> \ No newline at end of file
diff --git a/docs/src/integrate.txt b/docs/src/integrate.txt
index fb3fa5a..51a3dac 100644
--- a/docs/src/integrate.txt
+++ b/docs/src/integrate.txt
@@ -35,3 +35,9 @@ Antonio Cangiano has created a Pygments bundle for TextMate that allows to
colorize code via a simple menu option. It can be found here_.
.. _here: http://antoniocangiano.com/2008/10/28/pygments-textmate-bundle/
+
+Bash completion
+---------------
+
+The source distribution contains a file ``external/pygments.bashcomp`` that
+sets up completion for the ``pygmentize`` command in bash.
diff --git a/external/pygments.bashcomp b/external/pygments.bashcomp
new file mode 100644
index 0000000..1299fdb
--- /dev/null
+++ b/external/pygments.bashcomp
@@ -0,0 +1,38 @@
+#!bash
+#
+# Bash completion support for Pygments (the 'pygmentize' command).
+#
+
+_pygmentize()
+{
+ local cur prev
+
+ COMPREPLY=()
+ cur=`_get_cword`
+ prev=${COMP_WORDS[COMP_CWORD-1]}
+
+ case "$prev" in
+ -f)
+ FORMATTERS=`pygmentize -L formatters | grep '* ' | cut -c3- | sed -e 's/,//g' -e 's/:$//'`
+ COMPREPLY=( $( compgen -W '$FORMATTERS' -- "$cur" ) )
+ return 0
+ ;;
+ -l)
+ LEXERS=`pygmentize -L lexers | grep '* ' | cut -c3- | sed -e 's/,//g' -e 's/:$//'`
+ COMPREPLY=( $( compgen -W '$LEXERS' -- "$cur" ) )
+ return 0
+ ;;
+ -S)
+ STYLES=`pygmentize -L styles | grep '* ' | cut -c3- | sed s/:$//`
+ COMPREPLY=( $( compgen -W '$STYLES' -- "$cur" ) )
+ return 0
+ ;;
+ esac
+
+ if [[ "$cur" == -* ]]; then
+ COMPREPLY=( $( compgen -W '-f -l -S -L -g -O -P -F \
+ -N -H -h -V -o' -- "$cur" ) )
+ return 0
+ fi
+}
+complete -F _pygmentize -o default pygmentize
diff --git a/pygmentize b/pygmentize
deleted file mode 100755
index e237919..0000000
--- a/pygmentize
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python
-
-import sys, pygments.cmdline
-try:
- sys.exit(pygments.cmdline.main(sys.argv))
-except KeyboardInterrupt:
- sys.exit(1)
diff --git a/pygments/__init__.py b/pygments/__init__.py
index ff17af4..823862e 100644
--- a/pygments/__init__.py
+++ b/pygments/__init__.py
@@ -26,13 +26,13 @@
:license: BSD, see LICENSE for details.
"""
-__version__ = '1.2.2'
+__version__ = '1.3'
__docformat__ = 'restructuredtext'
__all__ = ['lex', 'format', 'highlight']
-import sys, os
+import sys
from pygments.util import StringIO, BytesIO
diff --git a/pygments/filters/__init__.py b/pygments/filters/__init__.py
index 382933b..504c3e1 100644
--- a/pygments/filters/__init__.py
+++ b/pygments/filters/__init__.py
@@ -9,17 +9,14 @@
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-try:
- set
-except NameError:
- from sets import Set as set
import re
+
from pygments.token import String, Comment, Keyword, Name, Error, Whitespace, \
string_to_tokentype
from pygments.filter import Filter
-from pygments.util import get_list_opt, get_int_opt, get_bool_opt, get_choice_opt, \
- ClassNotFound, OptionError
+from pygments.util import get_list_opt, get_int_opt, get_bool_opt, \
+ get_choice_opt, ClassNotFound, OptionError
from pygments.plugin import find_plugin_filters
diff --git a/pygments/formatters/__init__.py b/pygments/formatters/__init__.py
index e5802b9..0e02a52 100644
--- a/pygments/formatters/__init__.py
+++ b/pygments/formatters/__init__.py
@@ -13,7 +13,7 @@ import fnmatch
from pygments.formatters._mapping import FORMATTERS
from pygments.plugin import find_plugin_formatters
-from pygments.util import docstring_headline, ClassNotFound
+from pygments.util import ClassNotFound
ns = globals()
for fcls in FORMATTERS:
diff --git a/pygments/formatters/html.py b/pygments/formatters/html.py
index 96cde3f..5c0972e 100644
--- a/pygments/formatters/html.py
+++ b/pygments/formatters/html.py
@@ -8,13 +8,10 @@
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-import sys, os
-import StringIO
-try:
- set
-except NameError:
- from sets import Set as set
+import os
+import sys
+import StringIO
from pygments.formatter import Formatter
from pygments.token import Token, Text, STANDARD_TYPES
diff --git a/pygments/lexer.py b/pygments/lexer.py
index f28627b..fbcc39a 100644
--- a/pygments/lexer.py
+++ b/pygments/lexer.py
@@ -10,11 +10,6 @@
"""
import re
-try:
- set
-except NameError:
- from sets import Set as set
-
from pygments.filter import apply_filters, Filter
from pygments.filters import get_filter_by_name
from pygments.token import Error, Text, Other, _TokenType
@@ -23,7 +18,7 @@ from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
- 'LexerContext', 'include', 'flags', 'bygroups', 'using', 'this']
+ 'LexerContext', 'include', 'bygroups', 'using', 'this']
_default_analyse = staticmethod(lambda x: 0.0)
@@ -51,6 +46,10 @@ class Lexer(object):
``stripall``
Strip all leading and trailing whitespace from the input
(default: False).
+ ``ensurenl``
+ Make sure that the input ends with a newline (default: True). This
+ is required for some lexers that consume input linewise.
+ *New in Pygments 1.3.*
``tabsize``
If given and greater than 0, expand tabs in the input (default: 0).
``encoding``
@@ -82,6 +81,7 @@ class Lexer(object):
self.options = options
self.stripnl = get_bool_opt(options, 'stripnl', True)
self.stripall = get_bool_opt(options, 'stripall', False)
+ self.ensurenl = get_bool_opt(options, 'ensurenl', True)
self.tabsize = get_int_opt(options, 'tabsize', 0)
self.encoding = options.get('encoding', 'latin1')
# self.encoding = options.get('inencoding', None) or self.encoding
@@ -155,7 +155,7 @@ class Lexer(object):
text = text.strip('\n')
if self.tabsize > 0:
text = text.expandtabs(self.tabsize)
- if not text.endswith('\n'):
+ if self.ensurenl and not text.endswith('\n'):
text += '\n'
def streamer():
@@ -646,9 +646,15 @@ def do_insertions(insertions, tokens):
realpos += len(v) - oldi
# leftover tokens
- if insleft:
+ while insleft:
# no normal tokens, set realpos to zero
realpos = realpos or 0
for p, t, v in itokens:
yield realpos, t, v
realpos += len(v)
+ try:
+ index, itokens = insertions.next()
+ except StopIteration:
+ insleft = False
+ break # not strictly necessary
+
diff --git a/pygments/lexers/__init__.py b/pygments/lexers/__init__.py
index c1890a9..cce7e9b 100644
--- a/pygments/lexers/__init__.py
+++ b/pygments/lexers/__init__.py
@@ -8,16 +8,12 @@
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
import sys
-import fnmatch
import types
+import fnmatch
from os.path import basename
-try:
- set
-except NameError:
- from sets import Set as set
-
from pygments.lexers._mapping import LEXERS
from pygments.plugin import find_plugin_lexers
from pygments.util import ClassNotFound, bytes
@@ -223,7 +219,6 @@ class _automodule(types.ModuleType):
raise AttributeError(name)
-import sys
oldmod = sys.modules['pygments.lexers']
newmod = _automodule('pygments.lexers')
newmod.__dict__.update(oldmod.__dict__)
diff --git a/pygments/lexers/_mapping.py b/pygments/lexers/_mapping.py
index 7e06831..cdaf56a 100644
--- a/pygments/lexers/_mapping.py
+++ b/pygments/lexers/_mapping.py
@@ -17,6 +17,7 @@ LEXERS = {
'ABAPLexer': ('pygments.lexers.other', 'ABAP', ('abap',), ('*.abap',), ('text/x-abap',)),
'ActionScript3Lexer': ('pygments.lexers.web', 'ActionScript 3', ('as3', 'actionscript3'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
'ActionScriptLexer': ('pygments.lexers.web', 'ActionScript', ('as', 'actionscript'), ('*.as',), ('application/x-actionscript', 'text/x-actionscript', 'text/actionscript')),
+ 'AdaLexer': ('pygments.lexers.compiled', 'Ada', ('ada', 'ada95ada2005'), ('*.adb', '*.ads', '*.ada'), ('text/x-ada',)),
'AntlrActionScriptLexer': ('pygments.lexers.parsers', 'ANTLR With ActionScript Target', ('antlr-as', 'antlr-actionscript'), ('*.G', '*.g'), ()),
'AntlrCSharpLexer': ('pygments.lexers.parsers', 'ANTLR With C# Target', ('antlr-csharp', 'antlr-c#'), ('*.G', '*.g'), ()),
'AntlrCppLexer': ('pygments.lexers.parsers', 'ANTLR With CPP Target', ('antlr-cpp',), ('*.G', '*.g'), ()),
@@ -28,10 +29,10 @@ LEXERS = {
'AntlrRubyLexer': ('pygments.lexers.parsers', 'ANTLR With Ruby Target', ('antlr-ruby', 'antlr-rb'), ('*.G', '*.g'), ()),
'ApacheConfLexer': ('pygments.lexers.text', 'ApacheConf', ('apacheconf', 'aconf', 'apache'), ('.htaccess', 'apache.conf', 'apache2.conf'), ('text/x-apacheconf',)),
'AppleScriptLexer': ('pygments.lexers.other', 'AppleScript', ('applescript',), ('*.applescript',), ()),
- 'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy',), ('*.asy',), ('text/x-asymptote',)),
+ 'AsymptoteLexer': ('pygments.lexers.other', 'Asymptote', ('asy', 'asymptote'), ('*.asy',), ('text/x-asymptote',)),
'BBCodeLexer': ('pygments.lexers.text', 'BBCode', ('bbcode',), (), ('text/x-bbcode',)),
'BaseMakefileLexer': ('pygments.lexers.text', 'Makefile', ('basemake',), (), ()),
- 'BashLexer': ('pygments.lexers.other', 'Bash', ('bash', 'sh'), ('*.sh', '*.ebuild', '*.eclass'), ('application/x-sh', 'application/x-shellscript')),
+ 'BashLexer': ('pygments.lexers.other', 'Bash', ('bash', 'sh', 'ksh'), ('*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass'), ('application/x-sh', 'application/x-shellscript')),
'BashSessionLexer': ('pygments.lexers.other', 'Bash Session', ('console',), ('*.sh-session',), ('application/x-shell-session',)),
'BatchLexer': ('pygments.lexers.other', 'Batchfile', ('bat',), ('*.bat', '*.cmd'), ('application/x-dos-batch',)),
'BefungeLexer': ('pygments.lexers.other', 'Befunge', ('befunge',), ('*.befunge',), ('application/x-befunge',)),
@@ -47,6 +48,9 @@ LEXERS = {
'CheetahLexer': ('pygments.lexers.templates', 'Cheetah', ('cheetah', 'spitfire'), ('*.tmpl', '*.spt'), ('application/x-cheetah', 'application/x-spitfire')),
'CheetahXmlLexer': ('pygments.lexers.templates', 'XML+Cheetah', ('xml+cheetah', 'xml+spitfire'), (), ('application/xml+cheetah', 'application/xml+spitfire')),
'ClojureLexer': ('pygments.lexers.agile', 'Clojure', ('clojure', 'clj'), ('*.clj',), ('text/x-clojure', 'application/x-clojure')),
+ 'CoffeeScriptLexer': ('pygments.lexers.web', 'CoffeeScript', ('coffee-script', 'coffeescript'), ('*.coffee',), ('text/coffeescript',)),
+ 'ColdfusionHtmlLexer': ('pygments.lexers.templates', 'Coldufsion HTML', ('cfm',), ('*.cfm', '*.cfml', '*.cfc'), ('application/x-coldfusion',)),
+ 'ColdfusionLexer': ('pygments.lexers.templates', 'cfstatement', ('cfs',), (), ()),
'CommonLispLexer': ('pygments.lexers.functional', 'Common Lisp', ('common-lisp', 'cl'), ('*.cl', '*.lisp', '*.el'), ('text/x-common-lisp',)),
'CppLexer': ('pygments.lexers.compiled', 'C++', ('cpp', 'c++'), ('*.cpp', '*.hpp', '*.c++', '*.h++', '*.cc', '*.hh', '*.cxx', '*.hxx'), ('text/x-c++hdr', 'text/x-c++src')),
'CppObjdumpLexer': ('pygments.lexers.asm', 'cpp-objdump', ('cpp-objdump', 'c++-objdumb', 'cxx-objdump'), ('*.cpp-objdump', '*.c++-objdump', '*.cxx-objdump'), ('text/x-cpp-objdump',)),
@@ -71,6 +75,7 @@ LEXERS = {
'EvoqueHtmlLexer': ('pygments.lexers.templates', 'HTML+Evoque', ('html+evoque',), ('*.html',), ('text/html+evoque',)),
'EvoqueLexer': ('pygments.lexers.templates', 'Evoque', ('evoque',), ('*.evoque',), ('application/x-evoque',)),
'EvoqueXmlLexer': ('pygments.lexers.templates', 'XML+Evoque', ('xml+evoque',), ('*.xml',), ('application/xml+evoque',)),
+ 'FelixLexer': ('pygments.lexers.compiled', 'Felix', ('felix', 'flx'), ('*.flx', '*.flxh'), ('text/x-felix',)),
'FortranLexer': ('pygments.lexers.compiled', 'Fortran', ('fortran',), ('*.f', '*.f90'), ('text/x-fortran',)),
'GLShaderLexer': ('pygments.lexers.compiled', 'GLSL', ('glsl',), ('*.vert', '*.frag', '*.geo'), ('text/x-glslsrc',)),
'GasLexer': ('pygments.lexers.asm', 'GAS', ('gas',), ('*.s', '*.S'), ('text/x-gas',)),
@@ -81,7 +86,9 @@ LEXERS = {
'GnuplotLexer': ('pygments.lexers.other', 'Gnuplot', ('gnuplot',), ('*.plot', '*.plt'), ('text/x-gnuplot',)),
'GoLexer': ('pygments.lexers.compiled', 'Go', ('go',), ('*.go',), ('text/x-gosrc',)),
'GroffLexer': ('pygments.lexers.text', 'Groff', ('groff', 'nroff', 'man'), ('*.[1234567]', '*.man'), ('application/x-troff', 'text/troff')),
+ 'HamlLexer': ('pygments.lexers.web', 'Haml', ('haml', 'HAML'), ('*.haml',), ('text/x-haml',)),
'HaskellLexer': ('pygments.lexers.functional', 'Haskell', ('haskell', 'hs'), ('*.hs',), ('text/x-haskell',)),
+ 'HaxeLexer': ('pygments.lexers.web', 'haXe', ('hx', 'haXe'), ('*.hx',), ('text/haxe',)),
'HtmlDjangoLexer': ('pygments.lexers.templates', 'HTML+Django/Jinja', ('html+django', 'html+jinja'), (), ('text/html+django', 'text/html+jinja')),
'HtmlGenshiLexer': ('pygments.lexers.templates', 'HTML+Genshi', ('html+genshi', 'html+kid'), (), ('text/html+genshi',)),
'HtmlLexer': ('pygments.lexers.web', 'HTML', ('html',), ('*.html', '*.htm', '*.xhtml', '*.xslt'), ('text/html', 'application/xhtml+xml')),
@@ -114,6 +121,7 @@ LEXERS = {
'MatlabSessionLexer': ('pygments.lexers.math', 'Matlab session', ('matlabsession',), (), ()),
'MiniDLexer': ('pygments.lexers.agile', 'MiniD', ('minid',), ('*.md',), ('text/x-minidsrc',)),
'ModelicaLexer': ('pygments.lexers.other', 'Modelica', ('modelica',), ('*.mo',), ('text/x-modelica',)),
+ 'Modula2Lexer': ('pygments.lexers.compiled', 'Modula-2', ('modula2', 'm2'), ('*.def', '*.mod'), ('text/x-modula2',)),
'MoinWikiLexer': ('pygments.lexers.text', 'MoinMoin/Trac Wiki markup', ('trac-wiki', 'moin'), (), ('text/x-trac-wiki',)),
'MuPADLexer': ('pygments.lexers.math', 'MuPAD', ('mupad',), ('*.mu',), ()),
'MxmlLexer': ('pygments.lexers.web', 'MXML', ('mxml',), ('*.mxml',), ()),
@@ -129,6 +137,7 @@ LEXERS = {
'NumPyLexer': ('pygments.lexers.math', 'NumPy', ('numpy',), (), ()),
'ObjdumpLexer': ('pygments.lexers.asm', 'objdump', ('objdump',), ('*.objdump',), ('text/x-objdump',)),
'ObjectiveCLexer': ('pygments.lexers.compiled', 'Objective-C', ('objective-c', 'objectivec', 'obj-c', 'objc'), ('*.m',), ('text/x-objective-c',)),
+ 'ObjectiveJLexer': ('pygments.lexers.web', 'Objective-J', ('objective-j', 'objectivej', 'obj-j', 'objj'), ('*.j',), ('text/x-objective-j',)),
'OcamlLexer': ('pygments.lexers.compiled', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OcamlLexer': ('pygments.lexers.functional', 'OCaml', ('ocaml',), ('*.ml', '*.mli', '*.mll', '*.mly'), ('text/x-ocaml',)),
'OocLexer': ('pygments.lexers.compiled', 'Ooc', ('ooc',), ('*.ooc',), ('text/x-ooc',)),
@@ -139,8 +148,9 @@ LEXERS = {
'Python3Lexer': ('pygments.lexers.agile', 'Python 3', ('python3', 'py3'), (), ('text/x-python3', 'application/x-python3')),
'Python3TracebackLexer': ('pygments.lexers.agile', 'Python 3.0 Traceback', ('py3tb',), ('*.py3tb',), ('text/x-python3-traceback',)),
'PythonConsoleLexer': ('pygments.lexers.agile', 'Python console session', ('pycon',), (), ('text/x-python-doctest',)),
- 'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript'), ('text/x-python', 'application/x-python')),
+ 'PythonLexer': ('pygments.lexers.agile', 'Python', ('python', 'py'), ('*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac'), ('text/x-python', 'application/x-python')),
'PythonTracebackLexer': ('pygments.lexers.agile', 'Python Traceback', ('pytb',), ('*.pytb',), ('text/x-python-traceback',)),
+ 'RConsoleLexer': ('pygments.lexers.math', 'RConsole', ('rconsole', 'rout'), ('*.Rout',), ()),
'RagelCLexer': ('pygments.lexers.parsers', 'Ragel in C Host', ('ragel-c',), ('*.rl',), ()),
'RagelCppLexer': ('pygments.lexers.parsers', 'Ragel in CPP Host', ('ragel-cpp',), ('*.rl',), ()),
'RagelDLexer': ('pygments.lexers.parsers', 'Ragel in D Host', ('ragel-d',), ('*.rl',), ()),
@@ -157,6 +167,7 @@ LEXERS = {
'RubyConsoleLexer': ('pygments.lexers.agile', 'Ruby irb session', ('rbcon', 'irb'), (), ('text/x-ruby-shellsession',)),
'RubyLexer': ('pygments.lexers.agile', 'Ruby', ('rb', 'ruby'), ('*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx'), ('text/x-ruby', 'application/x-ruby')),
'SLexer': ('pygments.lexers.math', 'S', ('splus', 's', 'r'), ('*.S', '*.R'), ('text/S-plus', 'text/S', 'text/R')),
+ 'SassLexer': ('pygments.lexers.web', 'Sass', ('sass', 'SASS'), ('*.sass',), ('text/x-sass',)),
'ScalaLexer': ('pygments.lexers.compiled', 'Scala', ('scala',), ('*.scala',), ('text/x-scala',)),
'SchemeLexer': ('pygments.lexers.functional', 'Scheme', ('scheme', 'scm'), ('*.scm',), ('text/x-scheme', 'application/x-scheme')),
'SmalltalkLexer': ('pygments.lexers.other', 'Smalltalk', ('smalltalk', 'squeak'), ('*.st',), ('text/x-smalltalk',)),
diff --git a/pygments/lexers/agile.py b/pygments/lexers/agile.py
index dafed5c..bfaf0a6 100644
--- a/pygments/lexers/agile.py
+++ b/pygments/lexers/agile.py
@@ -10,10 +10,6 @@
"""
import re
-try:
- set
-except NameError:
- from sets import Set as set
from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, \
LexerContext, include, combined, do_insertions, bygroups, using
@@ -41,7 +37,7 @@ class PythonLexer(RegexLexer):
name = 'Python'
aliases = ['python', 'py']
- filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript']
+ filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript', '*.tac']
mimetypes = ['text/x-python', 'application/x-python']
tokens = {
@@ -659,7 +655,7 @@ class RubyLexer(ExtendedRegexLexer):
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
- r')(\s*)(/)(?!=)', bygroups(Text, String.Regex), 'multiline-regex'),
+ r')(\s*)(/)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls)
(r'(?<=\(|,)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
@@ -835,7 +831,6 @@ class PerlLexer(RegexLexer):
(r'@(\\\\|\\\@|[^\@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\\%|[^\%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\\$|[^\$])*\$[egimosx]*', String.Regex, '#pop'),
- (r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\#.*?$', Comment.Single),
@@ -859,6 +854,7 @@ class PerlLexer(RegexLexer):
(r's\((\\\\|\\\)|[^\)])*\)\s*', String.Regex, 'balanced-regex'),
(r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex),
+ (r'm(?=[/!\\{<\[\(@%\$])', String.Regex, 'balanced-regex'),
(r'((?<==~)|(?<=\())\s*/(\\\\|\\/|[^/])*/[gcimosx]*', String.Regex),
(r'\s+', Text),
(r'(abs|accept|alarm|atan2|bind|binmode|bless|caller|chdir|'
@@ -906,7 +902,7 @@ class PerlLexer(RegexLexer):
(r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'),
(r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'),
(r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'),
- (r'(q|qq|qw|qr|qx)(.)[.\n]*?\1', String.Other),
+ (r'(q|qq|qw|qr|qx)([^a-zA-Z0-9])(.|\n)*?\2', String.Other),
(r'package\s+', Keyword, 'modulename'),
(r'sub\s+', Keyword, 'funcname'),
(r'(\[\]|\*\*|::|<<|>>|>=|<=|<=>|={3}|!=|=~|'
@@ -970,7 +966,7 @@ class PerlLexer(RegexLexer):
(r'\\', String.Other),
(r'\<', String.Other, 'lt-string'),
(r'\>', String.Other, '#pop'),
- (r'[^\<\>]]+', String.Other)
+ (r'[^\<\>]+', String.Other)
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
@@ -1015,6 +1011,11 @@ class LuaLexer(RegexLexer):
tokens = {
'root': [
+ # lua allows a file to start with a shebang
+ (r'#!(.*?)$', Comment.Preproc),
+ (r'', Text, 'base'),
+ ],
+ 'base': [
(r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline),
('--.*$', Comment.Single),
@@ -1263,6 +1264,7 @@ class TclLexer(RegexLexer):
include('command'),
include('basic'),
include('data'),
+ (r'}', Keyword), # HACK: somehow we miscounted our braces
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
@@ -1441,7 +1443,7 @@ class ClojureLexer(RegexLexer):
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
- (r"\\([()/'\".'_!§$%& ?;=+-]{1}|[a-zA-Z0-9]+)", String.Char),
+ (r"\\([()/'\".'_!§$%& ?;=#+-]{1}|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
diff --git a/pygments/lexers/asm.py b/pygments/lexers/asm.py
index e574a59..4740569 100644
--- a/pygments/lexers/asm.py
+++ b/pygments/lexers/asm.py
@@ -10,10 +10,6 @@
"""
import re
-try:
- set
-except NameError:
- from sets import Set as set
from pygments.lexer import RegexLexer, include, bygroups, using, DelegatingLexer
from pygments.lexers.compiled import DLexer, CppLexer, CLexer
diff --git a/pygments/lexers/compiled.py b/pygments/lexers/compiled.py
index e382a1b..a2543e2 100644
--- a/pygments/lexers/compiled.py
+++ b/pygments/lexers/compiled.py
@@ -10,10 +10,6 @@
"""
import re
-try:
- set
-except NameError:
- from sets import Set as set
from pygments.scanner import Scanner
from pygments.lexer import Lexer, RegexLexer, include, bygroups, using, \
@@ -29,7 +25,8 @@ from pygments.lexers.functional import OcamlLexer
__all__ = ['CLexer', 'CppLexer', 'DLexer', 'DelphiLexer', 'JavaLexer',
'ScalaLexer', 'DylanLexer', 'OcamlLexer', 'ObjectiveCLexer',
'FortranLexer', 'GLShaderLexer', 'PrologLexer', 'CythonLexer',
- 'ValaLexer', 'OocLexer', 'GoLexer']
+ 'ValaLexer', 'OocLexer', 'GoLexer', 'FelixLexer', 'AdaLexer',
+ 'Modula2Lexer']
class CLexer(RegexLexer):
@@ -63,6 +60,7 @@ class CLexer(RegexLexer):
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
+ (r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.]', Punctuation),
(r'\b(case)(.+?)(:)', bygroups(Keyword, using(this), Text)),
@@ -187,6 +185,7 @@ class CppLexer(RegexLexer):
(r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
(r'0[0-7]+[Ll]?', Number.Oct),
(r'\d+[Ll]?', Number.Integer),
+ (r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;]', Punctuation),
(r'(asm|auto|break|case|catch|const|const_cast|continue|'
@@ -929,6 +928,7 @@ class JavaLexer(RegexLexer):
],
}
+
class ScalaLexer(RegexLexer):
"""
For `Scala <http://www.scala-lang.org>`_ source code.
@@ -1122,14 +1122,14 @@ class ObjectiveCLexer(RegexLexer):
(r'__(asm|int8|based|except|int16|stdcall|cdecl|fastcall|int32|'
r'declspec|finally|int64|try|leave)\b', Keyword.Reserved),
(r'(TRUE|FALSE|nil|NULL)\b', Name.Builtin),
- ('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
- ('[a-zA-Z_][a-zA-Z0-9_]*', Name),
+ ('[a-zA-Z$_][a-zA-Z0-9$_]*:(?!:)', Name.Label),
+ ('[a-zA-Z$_][a-zA-Z0-9$_]*', Name),
],
'root': [
include('whitespace'),
# functions
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
- r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
+ r'([a-zA-Z$_][a-zA-Z0-9$_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')({)',
bygroups(using(this), Name.Function,
@@ -1137,7 +1137,7 @@ class ObjectiveCLexer(RegexLexer):
'function'),
# function declarations
(r'((?:[a-zA-Z0-9_*\s])+?(?:\s|[*]))' # return arguments
- r'([a-zA-Z_][a-zA-Z0-9_]*)' # method name
+ r'([a-zA-Z$_][a-zA-Z0-9$_]*)' # method name
r'(\s*\([^;]*?\))' # signature
r'(' + _ws + r')(;)',
bygroups(using(this), Name.Function,
@@ -1151,18 +1151,18 @@ class ObjectiveCLexer(RegexLexer):
],
'classname' : [
# interface definition that inherits
- ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*:\s*)([a-zA-Z_][a-zA-Z0-9_]*)?',
+ ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*:\s*)([a-zA-Z$_][a-zA-Z0-9$_]*)?',
bygroups(Name.Class, Text, Name.Class), '#pop'),
# interface definition for a category
- ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*)(\([a-zA-Z_][a-zA-Z0-9_]*\))',
+ ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*)(\([a-zA-Z$_][a-zA-Z0-9$_]*\))',
bygroups(Name.Class, Text, Name.Label), '#pop'),
# simple interface / implementation
- ('([a-zA-Z_][a-zA-Z0-9_]*)', Name.Class, '#pop')
+ ('([a-zA-Z$_][a-zA-Z0-9$_]*)', Name.Class, '#pop')
],
'forward_classname' : [
- ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*,\s*)',
+ ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*,\s*)',
bygroups(Name.Class, Text), 'forward_classname'),
- ('([a-zA-Z_][a-zA-Z0-9_]*)(\s*;?)',
+ ('([a-zA-Z$_][a-zA-Z0-9$_]*)(\s*;?)',
bygroups(Name.Class, Text), '#pop')
],
'statement' : [
@@ -1790,3 +1790,576 @@ class GoLexer(RegexLexer):
(r'[a-zA-Z_]\w*', Name),
]
}
+
+
+class FelixLexer(RegexLexer):
+ """
+ For `Felix <http://www.felix-lang.org>`_ source code.
+
+ *New in Pygments 1.2.*
+ """
+
+ name = 'Felix'
+ aliases = ['felix', 'flx']
+ filenames = ['*.flx', '*.flxh']
+ mimetypes = ['text/x-felix']
+
+ preproc = [
+ 'elif', 'else', 'endif', 'if', 'ifdef', 'ifndef',
+ ]
+
+ keywords = [
+ '_', '_deref', 'all', 'as',
+ 'assert', 'attempt', 'call', 'callback', 'case', 'caseno', 'cclass',
+ 'code', 'compound', 'ctypes', 'do', 'done', 'downto', 'elif', 'else',
+ 'endattempt', 'endcase', 'endif', 'endmatch', 'enum', 'except',
+ 'exceptions', 'expect', 'finally', 'for', 'forall', 'forget', 'fork',
+ 'functor', 'goto', 'ident', 'if', 'incomplete', 'inherit', 'instance',
+ 'interface', 'jump', 'lambda', 'loop', 'match', 'module', 'namespace',
+ 'new', 'noexpand', 'nonterm', 'obj', 'of', 'open', 'parse', 'raise',
+ 'regexp', 'reglex', 'regmatch', 'rename', 'return', 'the', 'then',
+ 'to', 'type', 'typecase', 'typedef', 'typematch', 'typeof', 'upto',
+ 'when', 'whilst', 'with', 'yield',
+ ]
+
+ keyword_directives = [
+ '_gc_pointer', '_gc_type', 'body', 'comment', 'const', 'export',
+ 'header', 'inline', 'lval', 'macro', 'noinline', 'noreturn',
+ 'package', 'private', 'pod', 'property', 'public', 'publish',
+ 'requires', 'todo', 'virtual', 'use',
+ ]
+
+ keyword_declarations = [
+ 'def', 'let', 'ref', 'val', 'var',
+ ]
+
+ keyword_types = [
+ 'unit', 'void', 'any', 'bool',
+ 'byte', 'offset',
+ 'address', 'caddress', 'cvaddress', 'vaddress',
+ 'tiny', 'short', 'int', 'long', 'vlong',
+ 'utiny', 'ushort', 'vshort', 'uint', 'ulong', 'uvlong',
+ 'int8', 'int16', 'int32', 'int64',
+ 'uint8', 'uint16', 'uint32', 'uint64',
+ 'float', 'double', 'ldouble',
+ 'complex', 'dcomplex', 'lcomplex',
+ 'imaginary', 'dimaginary', 'limaginary',
+ 'char', 'wchar', 'uchar',
+ 'charp', 'charcp', 'ucharp', 'ucharcp',
+ 'string', 'wstring', 'ustring',
+ 'cont',
+ 'array', 'varray', 'list',
+ 'lvalue', 'opt', 'slice',
+ ]
+
+ keyword_constants = [
+ 'false', 'true',
+ ]
+
+ operator_words = [
+ 'and', 'not', 'in', 'is', 'isin', 'or', 'xor',
+ ]
+
+ name_builtins = [
+ '_svc', 'while',
+ ]
+
+ name_pseudo = [
+ 'root', 'self', 'this',
+ ]
+
+ decimal_suffixes = '([tTsSiIlLvV]|ll|LL|([iIuU])(8|16|32|64))?'
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+
+ # Keywords
+ (r'(axiom|ctor|fun|gen|proc|reduce|union)\b', Keyword,
+ 'funcname'),
+ (r'(class|cclass|cstruct|obj|struct)\b', Keyword, 'classname'),
+ (r'(instance|module|typeclass)\b', Keyword, 'modulename'),
+
+ (r'(%s)\b' % '|'.join(keywords), Keyword),
+ (r'(%s)\b' % '|'.join(keyword_directives), Name.Decorator),
+ (r'(%s)\b' % '|'.join(keyword_declarations), Keyword.Declaration),
+ (r'(%s)\b' % '|'.join(keyword_types), Keyword.Type),
+ (r'(%s)\b' % '|'.join(keyword_constants), Keyword.Constant),
+
+ # Operators
+ include('operators'),
+
+ # Float Literal
+ # -- Hex Float
+ (r'0[xX]([0-9a-fA-F_]*\.[0-9a-fA-F_]+|[0-9a-fA-F_]+)'
+ r'[pP][+\-]?[0-9_]+[lLfFdD]?', Number.Float),
+ # -- DecimalFloat
+ (r'[0-9_]+(\.[0-9_]+[eE][+\-]?[0-9_]+|'
+ r'\.[0-9_]*|[eE][+\-]?[0-9_]+)[lLfFdD]?', Number.Float),
+ (r'\.(0|[1-9][0-9_]*)([eE][+\-]?[0-9_]+)?[lLfFdD]?',
+ Number.Float),
+
+ # IntegerLiteral
+ # -- Binary
+ (r'0[Bb][01_]+%s' % decimal_suffixes, Number),
+ # -- Octal
+ (r'0[0-7_]+%s' % decimal_suffixes, Number.Oct),
+ # -- Hexadecimal
+ (r'0[xX][0-9a-fA-F_]+%s' % decimal_suffixes, Number.Hex),
+ # -- Decimal
+ (r'(0|[1-9][0-9_]*)%s' % decimal_suffixes, Number.Integer),
+
+ # Strings
+ ('([rR][cC]?|[cC][rR])"""', String, 'tdqs'),
+ ("([rR][cC]?|[cC][rR])'''", String, 'tsqs'),
+ ('([rR][cC]?|[cC][rR])"', String, 'dqs'),
+ ("([rR][cC]?|[cC][rR])'", String, 'sqs'),
+ ('[cCfFqQwWuU]?"""', String, combined('stringescape', 'tdqs')),
+ ("[cCfFqQwWuU]?'''", String, combined('stringescape', 'tsqs')),
+ ('[cCfFqQwWuU]?"', String, combined('stringescape', 'dqs')),
+ ("[cCfFqQwWuU]?'", String, combined('stringescape', 'sqs')),
+
+ # Punctuation
+ (r'[\[\]{}:(),;?]', Punctuation),
+
+ # Labels
+ (r'[a-zA-Z_]\w*:>', Name.Label),
+
+ # Identifiers
+ (r'(%s)\b' % '|'.join(name_builtins), Name.Builtin),
+ (r'(%s)\b' % '|'.join(name_pseudo), Name.Builtin.Pseudo),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'whitespace': [
+ (r'\n', Text),
+ (r'\s+', Text),
+
+ include('comment'),
+
+ # Preprocessor
+ (r'#\s*if\s+0', Comment.Preproc, 'if0'),
+ (r'#', Comment.Preproc, 'macro'),
+ ],
+ 'operators': [
+ (r'(%s)\b' % '|'.join(operator_words), Operator.Word),
+ (r'!=|==|<<|>>|\|\||&&|[-~+/*%=<>&^|.$]', Operator),
+ ],
+ 'comment': [
+ (r'//(.*?)\n', Comment.Single),
+ (r'/[*]', Comment.Multiline, 'comment2'),
+ ],
+ 'comment2': [
+ (r'[^\/*]', Comment.Multiline),
+ (r'/[*]', Comment.Multiline, '#push'),
+ (r'[*]/', Comment.Multiline, '#pop'),
+ (r'[\/*]', Comment.Multiline),
+ ],
+ 'if0': [
+ (r'^\s*#if.*?(?<!\\)\n', Comment, '#push'),
+ (r'^\s*#endif.*?(?<!\\)\n', Comment, '#pop'),
+ (r'.*?\n', Comment),
+ ],
+ 'macro': [
+ include('comment'),
+ (r'(import|include)(\s+)(<[^>]*?>)',
+ bygroups(Comment.Preproc, Text, String), '#pop'),
+ (r'(import|include)(\s+)("[^"]*?")',
+ bygroups(Comment.Preproc, Text, String), '#pop'),
+ (r"(import|include)(\s+)('[^']*?')",
+ bygroups(Comment.Preproc, Text, String), '#pop'),
+ (r'[^/\n]+', Comment.Preproc),
+ ##(r'/[*](.|\n)*?[*]/', Comment),
+ ##(r'//.*?\n', Comment, '#pop'),
+ (r'/', Comment.Preproc),
+ (r'(?<=\\)\n', Comment.Preproc),
+ (r'\n', Comment.Preproc, '#pop'),
+ ],
+ 'funcname': [
+ include('whitespace'),
+ (r'[a-zA-Z_]\w*', Name.Function, '#pop'),
+ # anonymous functions
+ (r'(?=\()', Text, '#pop'),
+ ],
+ 'classname': [
+ include('whitespace'),
+ (r'[a-zA-Z_]\w*', Name.Class, '#pop'),
+ # anonymous classes
+ (r'(?=\{)', Text, '#pop'),
+ ],
+ 'modulename': [
+ include('whitespace'),
+ (r'\[', Punctuation, ('modulename2', 'tvarlist')),
+ (r'', Error, 'modulename2'),
+ ],
+ 'modulename2': [
+ include('whitespace'),
+ (r'([a-zA-Z_]\w*)', Name.Namespace, '#pop:2'),
+ ],
+ 'tvarlist': [
+ include('whitespace'),
+ include('operators'),
+ (r'\[', Punctuation, '#push'),
+ (r'\]', Punctuation, '#pop'),
+ (r',', Punctuation),
+ (r'(with|where)\b', Keyword),
+ (r'[a-zA-Z_]\w*', Name),
+ ],
+ 'stringescape': [
+ (r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
+ r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
+ ],
+ 'strings': [
+ (r'%(\([a-zA-Z0-9]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
+ '[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
+ (r'[^\\\'"%\n]+', String),
+ # quotes, percents and backslashes must be parsed one at a time
+ (r'[\'"\\]', String),
+ # unhandled string formatting sign
+ (r'%', String)
+ # newlines are an error (use "nl" state)
+ ],
+ 'nl': [
+ (r'\n', String)
+ ],
+ 'dqs': [
+ (r'"', String, '#pop'),
+ # included here again for raw strings
+ (r'\\\\|\\"|\\\n', String.Escape),
+ include('strings')
+ ],
+ 'sqs': [
+ (r"'", String, '#pop'),
+ # included here again for raw strings
+ (r"\\\\|\\'|\\\n", String.Escape),
+ include('strings')
+ ],
+ 'tdqs': [
+ (r'"""', String, '#pop'),
+ include('strings'),
+ include('nl')
+ ],
+ 'tsqs': [
+ (r"'''", String, '#pop'),
+ include('strings'),
+ include('nl')
+ ],
+ }
+
+
+class AdaLexer(RegexLexer):
+ """
+ For Ada source code.
+
+ *New in Pygments 1.3.*
+ """
+
+ name = 'Ada'
+ aliases = ['ada', 'ada95' 'ada2005']
+ filenames = ['*.adb', '*.ads', '*.ada']
+ mimetypes = ['text/x-ada']
+
+ flags = re.MULTILINE | re.I # Ignore case
+
+ _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
+
+ tokens = {
+ 'root': [
+ (r'[^\S\n]+', Text),
+ (r'--.*?\n', Comment.Single),
+ (r'[^\S\n]+', Text),
+ (r'function|procedure|entry', Keyword.Declaration, 'subprogram'),
+ (r'(subtype|type)(\s+)([a-z0-9_]+)',
+ bygroups(Keyword.Declaration, Text, Keyword.Type), 'type_def'),
+ (r'task|protected', Keyword.Declaration),
+ (r'(subtype)(\s+)', bygroups(Keyword.Declaration, Text)),
+ (r'(end)(\s+)', bygroups(Keyword.Reserved, Text), 'end'),
+ (r'(pragma)(\s+)([a-zA-Z0-9_]+)', bygroups(Keyword.Reserved, Text,
+ Comment.Preproc)),
+ (r'(true|false|null)\b', Keyword.Constant),
+ (r'(Byte|Character|Float|Integer|Long_Float|Long_Integer|'
+ r'Long_Long_Float|Long_Long_Integer|Natural|Positive|Short_Float|'
+ r'Short_Integer|Short_Short_Float|Short_Short_Integer|String|'
+ r'Wide_String|Duration)\b', Keyword.Type),
+ (r'(and(\s+then)?|in|mod|not|or(\s+else)|rem)\b', Operator.Word),
+ (r'generic|private', Keyword.Declaration),
+ (r'package', Keyword.Declaration, 'package'),
+ (r'array\b', Keyword.Reserved, 'array_def'),
+ (r'(with|use)(\s+)', bygroups(Keyword.Namespace, Text), 'import'),
+ (r'([a-z0-9_]+)(\s*)(:)(\s*)(constant)',
+ bygroups(Name.Constant, Text, Punctuation, Text,
+ Keyword.Reserved)),
+ (r'<<[a-z0-9_]+>>', Name.Label),
+ (r'([a-z0-9_]+)(\s*)(:)(\s*)(declare|begin|loop|for|while)',
+ bygroups(Name.Label, Text, Punctuation, Text, Keyword.Reserved)),
+ (r'\b(abort|abs|abstract|accept|access|aliased|all|array|at|begin|'
+ r'body|case|constant|declare|delay|delta|digits|do|else|elsif|end|'
+ r'entry|exception|exit|interface|for|goto|if|is|limited|loop|new|'
+ r'null|of|or|others|out|overriding|pragma|protected|raise|range|'
+ r'record|renames|requeue|return|reverse|select|separate|subtype|'
+ r'synchronized|task|tagged|terminate|then|type|until|when|while|'
+ r'xor)\b',
+ Keyword.Reserved),
+ (r'"[^"]*"', String),
+ include('attribute'),
+ include('numbers'),
+ (r"'[^']'", String.Character),
+ (r'([a-z0-9_]+)(\s*|[(,])', bygroups(Name, using(this))),
+ (r"(<>|=>|:=|[\(\)\|:;,.'])", Punctuation),
+ (r'[*<>+=/&-]', Operator),
+ (r'\n+', Text),
+ ],
+ 'numbers' : [
+ (r'[0-9_]+#[0-9a-f]+#', Number.Hex),
+ (r'[0-9_]+\.[0-9_]*', Number.Float),
+ (r'[0-9_]+', Number.Integer),
+ ],
+ 'attribute' : [
+ (r"(')([a-zA-Z0-9_]+)", bygroups(Punctuation, Name.Attribute)),
+ ],
+ 'subprogram' : [
+ (r'\(', Punctuation, ('#pop', 'formal_part')),
+ (r';', Punctuation, '#pop'),
+ (r'is\b', Keyword.Reserved, '#pop'),
+ (r'"[^"]+"|[a-z0-9_]+', Name.Function),
+ include('root'),
+ ],
+ 'end' : [
+ ('(if|case|record|loop|select)', Keyword.Reserved),
+ ('"[^"]+"|[a-zA-Z0-9_]+', Name.Function),
+ ('[\n\s]+', Text),
+ (';', Punctuation, '#pop'),
+ ],
+ 'type_def': [
+ (r';', Punctuation, '#pop'),
+ (r'\(', Punctuation, 'formal_part'),
+ (r'with|and|use', Keyword.Reserved),
+ (r'array\b', Keyword.Reserved, ('#pop', 'array_def')),
+ (r'record\b', Keyword.Reserved, ('formal_part')),
+ include('root'),
+ ],
+ 'array_def' : [
+ (r';', Punctuation, '#pop'),
+ (r'([a-z0-9_]+)(\s+)(range)', bygroups(Keyword.Type, Text,
+ Keyword.Reserved)),
+ include('root'),
+ ],
+ 'import': [
+ (r'[a-z0-9_.]+', Name.Namespace, '#pop'),
+ ],
+ 'formal_part' : [
+ (r'\)', Punctuation, '#pop'),
+ (r'([a-z0-9_]+)(\s*)(,|:[^=])', bygroups(Name.Variable,
+ Text, Punctuation)),
+ (r'(in|not|null|out|access)\b', Keyword.Reserved),
+ include('root'),
+ ],
+ 'package': [
+ ('body', Keyword.Declaration),
+ ('is\s+new|renames', Keyword.Reserved),
+ ('is', Keyword.Reserved, '#pop'),
+ (';', Punctuation, '#pop'),
+ ('\(', Punctuation, 'package_instantiation'),
+ ('([a-zA-Z0-9_.]+)', Name.Class),
+ include('root'),
+ ],
+ 'package_instantiation': [
+ (r'("[^"]+"|[a-z0-9_]+)(\s+)(=>)', bygroups(Name.Variable,
+ Text, Punctuation)),
+ (r'[a-z0-9._\'"]', Text),
+ (r'\)', Punctuation, '#pop'),
+ include('root'),
+ ],
+ }
+
+
+class Modula2Lexer(RegexLexer):
+ """
+ For `Modula-2 <http://www.modula2.org/>`_ source code.
+
+ Additional options that determine which keywords are highlighted:
+
+ `pim`
+ Select PIM Modula-2 dialect (default: True).
+ `iso`
+ Select ISO Modula-2 dialect (default: False).
+ `objm2`
+ Select Objective Modula-2 dialect (default: False).
+ `gm2ext`
+ Also highlight GNU extensions (default: False).
+
+ *New in Pygments 1.3.*
+ """
+ name = 'Modula-2'
+ aliases = ['modula2', 'm2']
+ filenames = ['*.def', '*.mod']
+ mimetypes = ['text/x-modula2']
+
+ flags = re.MULTILINE | re.DOTALL
+
+ tokens = {
+ 'whitespace': [
+ (r'\n+', Text), # blank lines
+ (r'\s+', Text), # whitespace
+ ],
+ 'identifiers': [
+ (r'([a-zA-Z_\$][a-zA-Z0-9_\$]*)', Name),
+ ],
+ 'numliterals': [
+ (r'[01]+B', Number.Binary), # binary number (ObjM2)
+ (r'[0-7]+B', Number.Oct), # octal number (PIM + ISO)
+ (r'[0-7]+C', Number.Oct), # char code (PIM + ISO)
+ (r'[0-9A-F]+C', Number.Hex), # char code (ObjM2)
+ (r'[0-9A-F]+H', Number.Hex), # hexadecimal number
+ (r'[0-9]+\.[0-9]+E[+-][0-9]+', Number.Float), # real number
+ (r'[0-9]+\.[0-9]+', Number.Float), # real number
+ (r'[0-9]+', Number.Integer), # decimal whole number
+ ],
+ 'strings': [
+ (r"'(\\\\|\\'|[^'])*'", String), # single quoted string
+ (r'"(\\\\|\\"|[^"])*"', String), # double quoted string
+ ],
+ 'operators': [
+ (r'[*/+=#~&<>\^-]', Operator),
+ (r':=', Operator), # assignment
+ (r'@', Operator), # pointer deref (ISO)
+ (r'\.\.', Operator), # ellipsis or range
+ (r'`', Operator), # Smalltalk message (ObjM2)
+ (r'::', Operator), # type conversion (ObjM2)
+ ],
+ 'punctuation': [
+ (r'[\(\)\[\]{},.:;|]', Punctuation),
+ ],
+ 'comments': [
+ (r'//.*?\n', Comment.Single), # ObjM2
+ (r'/\*(.*?)\*/', Comment.Multiline), # ObjM2
+ (r'\(\*([^\$].*?)\*\)', Comment.Multiline),
+ # TO DO: nesting of (* ... *) comments
+ ],
+ 'pragmas': [
+ (r'\(\*\$(.*?)\*\)', Comment.Preproc), # PIM
+ (r'<\*(.*?)\*>', Comment.Preproc), # ISO + ObjM2
+ ],
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+ include('pragmas'),
+ include('identifiers'),
+ include('numliterals'),
+ include('strings'),
+ include('operators'),
+ include('punctuation'),
+ ]
+ }
+
+ pim_reserved_words = [
+ # 40 reserved words
+ 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION',
+ 'DIV', 'DO', 'ELSE', 'ELSIF', 'END', 'EXIT', 'EXPORT', 'FOR',
+ 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN', 'LOOP', 'MOD',
+ 'MODULE', 'NOT', 'OF', 'OR', 'POINTER', 'PROCEDURE', 'QUALIFIED',
+ 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
+ 'UNTIL', 'VAR', 'WHILE', 'WITH',
+ ]
+
+ pim_pervasives = [
+ # 31 pervasives
+ 'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'DEC',
+ 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH', 'INC', 'INCL',
+ 'INTEGER', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW', 'NIL', 'ODD',
+ 'ORD', 'PROC', 'REAL', 'SIZE', 'TRUE', 'TRUNC', 'VAL',
+ ]
+
+ iso_reserved_words = [
+ # 46 reserved words
+ 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
+ 'DO', 'ELSE', 'ELSIF', 'END', 'EXCEPT', 'EXIT', 'EXPORT', 'FINALLY',
+ 'FOR', 'FORWARD', 'FROM', 'IF', 'IMPLEMENTATION', 'IMPORT', 'IN',
+ 'LOOP', 'MOD', 'MODULE', 'NOT', 'OF', 'OR', 'PACKEDSET', 'POINTER',
+ 'PROCEDURE', 'QUALIFIED', 'RECORD', 'REPEAT', 'REM', 'RETRY',
+ 'RETURN', 'SET', 'THEN', 'TO', 'TYPE', 'UNTIL', 'VAR', 'WHILE',
+ 'WITH',
+ ]
+
+ iso_pervasives = [
+ # 42 pervasives
+ 'ABS', 'BITSET', 'BOOLEAN', 'CAP', 'CARDINAL', 'CHAR', 'CHR', 'CMPLX',
+ 'COMPLEX', 'DEC', 'DISPOSE', 'EXCL', 'FALSE', 'FLOAT', 'HALT', 'HIGH',
+ 'IM', 'INC', 'INCL', 'INT', 'INTEGER', 'INTERRUPTIBLE', 'LENGTH',
+ 'LFLOAT', 'LONGCOMPLEX', 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEW',
+ 'NIL', 'ODD', 'ORD', 'PROC', 'PROTECTION', 'RE', 'REAL', 'SIZE',
+ 'TRUE', 'TRUNC', 'UNINTERRUBTIBLE', 'VAL',
+ ]
+
+ objm2_reserved_words = [
+ # base language, 42 reserved words
+ 'AND', 'ARRAY', 'BEGIN', 'BY', 'CASE', 'CONST', 'DEFINITION', 'DIV',
+ 'DO', 'ELSE', 'ELSIF', 'END', 'ENUM', 'EXIT', 'FOR', 'FROM', 'IF',
+ 'IMMUTABLE', 'IMPLEMENTATION', 'IMPORT', 'IN', 'IS', 'LOOP', 'MOD',
+ 'MODULE', 'NOT', 'OF', 'OPAQUE', 'OR', 'POINTER', 'PROCEDURE',
+ 'RECORD', 'REPEAT', 'RETURN', 'SET', 'THEN', 'TO', 'TYPE',
+ 'UNTIL', 'VAR', 'VARIADIC', 'WHILE',
+ # OO extensions, 16 reserved words
+ 'BYCOPY', 'BYREF', 'CLASS', 'CONTINUE', 'CRITICAL', 'INOUT', 'METHOD',
+ 'ON', 'OPTIONAL', 'OUT', 'PRIVATE', 'PROTECTED', 'PROTOCOL', 'PUBLIC',
+ 'SUPER', 'TRY',
+ ]
+
+ objm2_pervasives = [
+ # base language, 38 pervasives
+ 'ABS', 'BITSET', 'BOOLEAN', 'CARDINAL', 'CHAR', 'CHR', 'DISPOSE',
+ 'FALSE', 'HALT', 'HIGH', 'INTEGER', 'INRANGE', 'LENGTH', 'LONGCARD',
+ 'LONGINT', 'LONGREAL', 'MAX', 'MIN', 'NEG', 'NEW', 'NEXTV', 'NIL',
+ 'OCTET', 'ODD', 'ORD', 'PRED', 'PROC', 'READ', 'REAL', 'SUCC', 'TMAX',
+ 'TMIN', 'TRUE', 'TSIZE', 'UNICHAR', 'VAL', 'WRITE', 'WRITEF',
+ # OO extensions, 3 pervasives
+ 'OBJECT', 'NO', 'YES',
+ ]
+
+ gnu_reserved_words = [
+ # 10 additional reserved words
+ 'ASM', '__ATTRIBUTE__', '__BUILTIN__', '__COLUMN__', '__DATE__',
+ '__FILE__', '__FUNCTION__', '__LINE__', '__MODULE__', 'VOLATILE',
+ ]
+
+ gnu_pervasives = [
+ # 21 identifiers, actually from pseudo-module SYSTEM
+ # but we will highlight them as if they were pervasives
+ 'BITSET8', 'BITSET16', 'BITSET32', 'CARDINAL8', 'CARDINAL16',
+ 'CARDINAL32', 'CARDINAL64', 'COMPLEX32', 'COMPLEX64', 'COMPLEX96',
+ 'COMPLEX128', 'INTEGER8', 'INTEGER16', 'INTEGER32', 'INTEGER64',
+ 'REAL8', 'REAL16', 'REAL32', 'REAL96', 'REAL128', 'THROW',
+ ]
+
+ def __init__(self, **options):
+ self.reserved_words = set()
+ self.pervasives = set()
+ # ISO Modula-2
+ if get_bool_opt(options, 'iso', False):
+ self.reserved_words.update(self.iso_reserved_words)
+ self.pervasives.update(self.iso_pervasives)
+ # Objective Modula-2
+ elif get_bool_opt(options, 'objm2', False):
+ self.reserved_words.update(self.objm2_reserved_words)
+ self.pervasives.update(self.objm2_pervasives)
+ # PIM Modula-2 (DEFAULT)
+ else:
+ self.reserved_words.update(self.pim_reserved_words)
+ self.pervasives.update(self.pim_pervasives)
+ # GNU extensions
+ if get_bool_opt(options, 'gm2ext', False):
+ self.reserved_words.update(self.gnu_reserved_words)
+ self.pervasives.update(self.gnu_pervasives)
+ # initialise
+ RegexLexer.__init__(self, **options)
+
+ def get_tokens_unprocessed(self, text):
+ for index, token, value in \
+ RegexLexer.get_tokens_unprocessed(self, text):
+ # check for reserved words and pervasives
+ if token is Name:
+ if value in self.reserved_words:
+ token = Keyword.Reserved
+ elif value in self.pervasives:
+ token = Keyword.Pervasive
+ # return result
+ yield index, token, value
diff --git a/pygments/lexers/functional.py b/pygments/lexers/functional.py
index 8431ba3..ffbd753 100644
--- a/pygments/lexers/functional.py
+++ b/pygments/lexers/functional.py
@@ -10,10 +10,6 @@
"""
import re
-try:
- set
-except NameError:
- from sets import Set as set
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Text, Comment, Operator, Keyword, Name, \
@@ -474,7 +470,7 @@ class LiterateHaskellLexer(Lexer):
style = self.options.get('litstyle')
if style is None:
- style = (text.lstrip()[0] in '%\\') and 'latex' or 'bird'
+ style = (text.lstrip()[0:1] in '%\\') and 'latex' or 'bird'
code = ''
insertions = []
diff --git a/pygments/lexers/math.py b/pygments/lexers/math.py
index 79097f2..448e299 100644
--- a/pygments/lexers/math.py
+++ b/pygments/lexers/math.py
@@ -10,10 +10,6 @@
"""
import re
-try:
- set
-except NameError:
- from sets import Set as set
from pygments.lexer import Lexer, RegexLexer, bygroups, include, do_insertions
from pygments.token import Comment, String, Punctuation, Keyword, Name, \
@@ -22,7 +18,7 @@ from pygments.token import Comment, String, Punctuation, Keyword, Name, \
from pygments.lexers.agile import PythonLexer
__all__ = ['MuPADLexer', 'MatlabLexer', 'MatlabSessionLexer', 'NumPyLexer',
- 'SLexer']
+ 'RConsoleLexer', 'SLexer']
class MuPADLexer(RegexLexer):
@@ -340,6 +336,52 @@ class NumPyLexer(PythonLexer):
yield index, token, value
+class RConsoleLexer(Lexer):
+ """
+ For R console transcripts or R CMD BATCH output files.
+ """
+
+ name = 'RConsole'
+ aliases = ['rconsole', 'rout']
+ filenames = ['*.Rout']
+
+ def get_tokens_unprocessed(self, text):
+ slexer = SLexer(**self.options)
+
+ current_code_block = ''
+ insertions = []
+
+ for match in line_re.finditer(text):
+ line = match.group()
+ if line.startswith('>') or line.startswith('+'):
+ # Colorize the prompt as such,
+ # then put rest of line into current_code_block
+ insertions.append((len(current_code_block),
+ [(0, Generic.Prompt, line[:2])]))
+ current_code_block += line[2:]
+ else:
+ # We have reached a non-prompt line!
+ # If we have stored prompt lines, need to process them first.
+ if current_code_block:
+ # Weave together the prompts and highlight code.
+ for item in do_insertions(insertions,
+ slexer.get_tokens_unprocessed(current_code_block)):
+ yield item
+ # Reset vars for next code block.
+ current_code_block = ''
+ insertions = []
+ # Now process the actual line itself, this is output from R.
+ yield match.start(), Generic.Output, line
+
+ # If we happen to end on a code block with nothing after it, need to
+ # process the last code block. This is neither elegant nor DRY so
+ # should be changed.
+ if current_code_block:
+ for item in do_insertions(insertions,
+ slexer.get_tokens_unprocessed(current_code_block)):
+ yield item
+
+
class SLexer(RegexLexer):
"""
For S, S-plus, and R source code.
diff --git a/pygments/lexers/other.py b/pygments/lexers/other.py
index 7902435..8ca00fe 100644
--- a/pygments/lexers/other.py
+++ b/pygments/lexers/other.py
@@ -321,14 +321,14 @@ class BefungeLexer(RegexLexer):
class BashLexer(RegexLexer):
"""
- Lexer for (ba)sh shell scripts.
+ Lexer for (ba|k|)sh shell scripts.
*New in Pygments 0.6.*
"""
name = 'Bash'
- aliases = ['bash', 'sh']
- filenames = ['*.sh', '*.ebuild', '*.eclass']
+ aliases = ['bash', 'sh', 'ksh']
+ filenames = ['*.sh', '*.ksh', '*.bash', '*.ebuild', '*.eclass']
mimetypes = ['application/x-sh', 'application/x-shellscript']
tokens = {
@@ -633,11 +633,11 @@ class SmalltalkLexer(RegexLexer):
],
'_parenth_helper' : [
include('whitespaces'),
+ (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'[-+*/\\~<>=|&#!?,@%\w+:]+', String.Symbol),
# literals
(r'\'[^\']*\'', String),
(r'\$.', String.Char),
- (r'(\d+r)?-?\d+(\.\d+)?(e-?\d+)?', Number),
(r'#*\(', String.Symbol, 'inner_parenth'),
],
'parenth' : [
@@ -2107,45 +2107,45 @@ class GherkinLexer(RegexLexer):
feature_keywords_regexp = ur'^(기능|機能|功能|フィーチャ|خاصية|תכונה|Функционалност|Функционал|Особина|Могућност|Özellik|Właściwość|Tính năng|Savybė|Požiadavka|Požadavek|Osobina|Ominaisuus|Omadus|OH HAI|Mogućnost|Mogucnost|Jellemző|Fīča|Funzionalità|Funktionalität|Funkcionalnost|Funkcionalitāte|Funcționalitate|Functionaliteit|Functionalitate|Funcionalidade|Fonctionnalité|Fitur|Feature|Egenskap|Egenskab|Crikey|Característica|Arwedd)(:)(.*)$'
scenario_keywords_regexp = ur'^(\s*)(시나리오 개요|시나리오|배경|背景|場景大綱|場景|场景大纲|场景|劇本大綱|劇本|テンプレ|シナリオテンプレート|シナリオテンプレ|シナリオアウトライン|シナリオ|سيناريو مخطط|سيناريو|الخلفية|תרחיש|תבנית תרחיש|רקע|Тарих|Сценарио|Сценарий структураси|Сценарий|Структура сценарија|Структура сценария|Скица|Рамка на сценарий|Пример|Предыстория|Предистория|Позадина|Основа|Концепт|Контекст|Założenia|Tình huống|Tausta|Taust|Tapausaihio|Tapaus|Szenariogrundriss|Szenario|Szablon scenariusza|Stsenaarium|Struktura scenarija|Skica|Skenario konsep|Skenario|Situācija|Senaryo taslağı|Senaryo|Scénář|Scénario|Schema dello scenario|Scenārijs pēc parauga|Scenārijs|Scenár|Scenariusz|Scenariul de şablon|Scenariul de sablon|Scenariu|Scenario Outline|Scenario Amlinellol|Scenario|Scenarijus|Scenarijaus šablonas|Scenarij|Scenarie|Rerefons|Raamstsenaarium|Primer|Pozadí|Pozadina|Pozadie|Plan du scénario|Plan du Scénario|Osnova scénáře|Osnova|Náčrt Scénáře|Náčrt Scenáru|Mate|MISHUN SRSLY|MISHUN|Kịch bản|Kontext|Konteksts|Kontekstas|Kontekst|Koncept|Khung tình huống|Khung kịch bản|Háttér|Grundlage|Geçmiş|Forgatókönyv vázlat|Forgatókönyv|Esquema do Cenário|Esquema do Cenario|Esquema del escenario|Esquema de l\'escenari|Escenario|Escenari|Dasar|Contexto|Contexte|Contesto|Condiţii|Conditii|Cenário|Cenario|Cefndir|Bối cảnh|Blokes|Bakgrunn|Bakgrund|Baggrund|Background|B4|Antecedents|Antecedentes|All y\'all|Achtergrond|Abstrakt Scenario|Abstract Scenario)(:)(.*)$'
examples_regexp = ur'^(\s*)(예|例子|例|サンプル|امثلة|דוגמאות|Сценарији|Примери|Мисоллар|Значения|Örnekler|Voorbeelden|Variantai|Tapaukset|Scenarios|Scenariji|Scenarijai|Příklady|Példák|Príklady|Przykłady|Primjeri|Primeri|Piemēri|Pavyzdžiai|Paraugs|Juhtumid|Exemplos|Exemples|Exemplele|Exempel|Examples|Esempi|Enghreifftiau|Eksempler|Ejemplos|EXAMPLZ|Dữ liệu|Contoh|Cobber|Beispiele)(:)(.*)$'
- step_keywords_regexp = ur'^(\s*)(하지만|조건|만일|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Унда |То |Онда |Но |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Агар |А |Și |És |anrhegedig a |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Yna |Ya know how |Ya gotta |Y |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Soit |Siis |Si |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Donat |Donada |Diyelim ki |Dengan |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |But y\'all |But |Biết |Bet |BUT |Atunci |And y\'all |And |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A )'
+ step_keywords_regexp = ur'^(\s*)(하지만|조건|만일|그리고|그러면|那麼|那么|而且|當|当|前提|假設|假如|但是|但し|並且|もし|ならば|ただし|しかし|かつ|و |متى |لكن |عندما |ثم |بفرض |اذاً |כאשר |וגם |בהינתן |אזי |אז |אבל |Унда |То |Онда |Но |Лекин |Когато |Када |Кад |К тому же |И |Задато |Задати |Задате |Если |Допустим |Дадено |Ва |Бирок |Аммо |Али |Агар |А |Și |És |anrhegedig a |Zatati |Zakładając |Zadato |Zadate |Zadano |Zadani |Zadan |Yna |Ya know how |Ya gotta |Y |Wtedy |When y\'all |When |Wenn |WEN |Và |Ve |Und |Un |Thì |Then y\'all |Then |Tapi |Tak |Tada |Tad |Så |Stel |Soit |Siis |Si |Quando |Quand |Quan |Pryd |Pokud |Pokiaľ |Però |Pero |Pak |Oraz |Onda |Ond |Oletetaan |Og |Och |O zaman |Når |När |Niin |Nhưng |N |Mutta |Men |Mas |Maka |Majd |Mais |Maar |Ma |Lorsque |Lorsqu\'|Kun |Kuid |Kui |Khi |Keď |Ketika |Když |Kai |Kada |Kad |Jeżeli |Ja |Ir |I CAN HAZ |I |Ha |Givet |Given y\'all |Given |Gitt |Gegeven |Gegeben sei |Fakat |Eğer ki |Etant donné |Et |Então |Entonces |Entao |En |Eeldades |E |Duota |Donat |Donada |Diyelim ki |Dengan |De |Dato |Dar |Dann |Dan |Dado |Dacă |Daca |DEN |Când |Cuando |Cho |Cept |Cand |But y\'all |But |Biết |Bet |BUT |Atunci |And y\'all |And |Ama |Als |Alors |Allora |Ali |Aleshores |Ale |Akkor |Aber |AN |A také |A |\* )'
tokens = {
'comments': [
- (r'#.*$', Comment),
+ (r'#.*$', Comment)
],
'multiline_descriptions' : [
(step_keywords_regexp, Keyword, "#pop"),
include('comments'),
- (r"(\s|.)", Name.Constant),
+ (r"(\s|.)", Name.Constant)
],
'multiline_descriptions_on_stack' : [
(step_keywords_regexp, Keyword, "#pop:2"),
include('comments'),
- (r"(\s|.)", Name.Constant),
+ (r"(\s|.)", Name.Constant)
],
'scenario_table_description': [
(r"\s+\|", Text, 'scenario_table_header'),
include('comments'),
- (r"(\s|.)", Name.Constant),
+ (r"(\s|.)", Name.Constant)
],
'scenario_table_header': [
(r"\s+\|\s*$", Text, "#pop:2"),
(r"(\s+\|\s*)(#.*)$", bygroups(Text, Comment), "#pop:2"),
include('comments'),
(r"\s+\|", Text),
- (r"[^\|]", Name.Variable),
+ (r"[^\|]", Name.Variable)
],
'scenario_sections_on_stack': [
(scenario_keywords_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
- "multiline_descriptions_on_stack"),
+ "multiline_descriptions_on_stack")
],
'narrative': [
include('scenario_sections_on_stack'),
- (r"(\s|.)", Name.Builtin),
+ (r"(\s|.)", Name.Builtin)
],
'table_vars': [
- (r'(<[^>]*>)', bygroups(Name.Variable)),
+ (r'(<[^>]*>)', bygroups(Name.Variable))
],
'string': [
include('table_vars'),
@@ -2159,16 +2159,11 @@ class GherkinLexer(RegexLexer):
(r'"', String, "#pop"),
include('string'),
],
- 'single_string': [
- (r"'", String, "#pop"),
- include('string'),
- ],
'root': [
(r'\n', Text),
include('comments'),
(r'"""', String, "py_string"),
(r'"', String, "double_string"),
- (r"'", String, "single_string"),
include('table_vars'),
(r'@[^@\s]+', Name.Namespace),
(step_keywords_regexp, bygroups(Text, Keyword)),
@@ -2180,7 +2175,7 @@ class GherkinLexer(RegexLexer):
(examples_regexp,
bygroups(Text, Name.Class, Name.Class, Name.Constant),
"scenario_table_description"),
- (r'(\s|.)', Text),
+ (r'(\s|.)', Text)
]
}
diff --git a/pygments/lexers/parsers.py b/pygments/lexers/parsers.py
index 15d58eb..0ead39b 100644
--- a/pygments/lexers/parsers.py
+++ b/pygments/lexers/parsers.py
@@ -12,17 +12,15 @@
import re
from pygments.lexer import RegexLexer, DelegatingLexer, \
- include, bygroups, using, this
-from pygments.token import Error, Punctuation, Generic, Other, \
- Text, Comment, Operator, Keyword, Name, String, Number, Whitespace
+ include, bygroups, using
+from pygments.token import Punctuation, Other, Text, Comment, Operator, \
+ Keyword, Name, String, Number, Whitespace
from pygments.lexers.compiled import JavaLexer, CLexer, CppLexer, \
ObjectiveCLexer, DLexer
from pygments.lexers.dotnet import CSharpLexer
from pygments.lexers.agile import RubyLexer, PythonLexer, PerlLexer
from pygments.lexers.web import ActionScriptLexer
-# Use TextLexer during development to just focus on one part of a delegating
-# lexer.
-from pygments.lexers.special import TextLexer
+
__all__ = ['RagelLexer', 'RagelEmbeddedLexer', 'RagelCLexer', 'RagelDLexer',
'RagelCppLexer', 'RagelObjectiveCLexer', 'RagelRubyLexer',
diff --git a/pygments/lexers/templates.py b/pygments/lexers/templates.py
index 49b843d..eb84745 100644
--- a/pygments/lexers/templates.py
+++ b/pygments/lexers/templates.py
@@ -10,10 +10,6 @@
"""
import re
-try:
- set
-except NameError:
- from sets import Set as set
from pygments.lexers.web import \
PhpLexer, HtmlLexer, XmlLexer, JavascriptLexer, CssLexer
@@ -38,7 +34,8 @@ __all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
'CheetahXmlLexer', 'CheetahJavascriptLexer',
- 'EvoqueLexer', 'EvoqueHtmlLexer', 'EvoqueXmlLexer']
+ 'EvoqueLexer', 'EvoqueHtmlLexer', 'EvoqueXmlLexer',
+ 'ColdfusionLexer', 'ColdfusionHtmlLexer']
class ErbLexer(Lexer):
@@ -246,7 +243,7 @@ class DjangoLexer(RegexLexer):
(r'\.[a-zA-Z0-9_]+', Name.Variable),
(r':?"(\\\\|\\"|[^"])*"', String.Double),
(r":?'(\\\\|\\'|[^'])*'", String.Single),
- (r'([{}()\[\]+\-*/,:]|[><=]=?)', Operator),
+ (r'([{}()\[\]+\-*/,:~]|[><=]=?)', Operator),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
],
@@ -1294,3 +1291,97 @@ class EvoqueXmlLexer(DelegatingLexer):
def __init__(self, **options):
super(EvoqueXmlLexer, self).__init__(XmlLexer, EvoqueLexer,
**options)
+
+class ColdfusionLexer(RegexLexer):
+ """
+ Coldfusion statements
+ """
+ name = 'cfstatement'
+ aliases = ['cfs']
+ filenames = []
+ mimetypes = []
+ flags = re.IGNORECASE | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ (r'//.*', Comment),
+ (r'\+\+|--', Operator),
+ (r'[-+*/^&=!]', Operator),
+ (r'<=|>=|<|>', Operator),
+ (r'mod\b', Operator),
+ (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
+ (r'\|\||&&', Operator),
+ (r'"', String.Double, 'string'),
+ # There is a special rule for allowing html in single quoted
+ # strings, evidently.
+ (r"'.*?'", String.Single),
+ (r'\d+', Number),
+ (r'(if|else|len|var|case|default|break|switch)\b', Keyword),
+ (r'([A-Za-z_$][A-Za-z0-9_.]*)\s*(\()', bygroups(Name.Function, Punctuation)),
+ (r'[A-Za-z_$][A-Za-z0-9_.]*', Name.Variable),
+ (r'[()\[\]{};:,.\\]', Punctuation),
+ (r'\s+', Text),
+ ],
+ 'string': [
+ (r'""', String.Double),
+ (r'#.+?#', String.Interp),
+ (r'[^"#]+', String.Double),
+ (r'#', String.Double),
+ (r'"', String.Double, '#pop'),
+ ],
+ }
+
+class ColdfusionMarkupLexer(RegexLexer):
+ """
+ Coldfusion markup only
+ """
+ name = 'Coldfusion'
+ aliases = ['cf']
+ filenames = []
+ mimetypes = []
+
+ tokens = {
+ 'root': [
+ (r'[^<]+', Other),
+ include('tags'),
+ (r'<[^<>]*', Other),
+ ],
+ 'tags': [
+ (r'(?s)<!---.*?--->', Comment.Multiline),
+ (r'(?s)<!--.*?-->', Comment),
+ (r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
+ (r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
+ bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
+ # negative lookbehind is for strings with embedded >
+ (r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
+ r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
+ r'mailpart|mail|header|content|zip|image|lock|argument|try|'
+ r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
+ bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
+ ],
+ 'cfoutput': [
+ (r'[^#<]+', Other),
+ (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
+ Punctuation)),
+ #(r'<cfoutput.*?>', Name.Builtin, '#push'),
+ (r'</cfoutput.*?>', Name.Builtin, '#pop'),
+ include('tags'),
+ (r'(?s)<[^<>]*', Other),
+ (r'#', Other),
+ ],
+ }
+
+
+class ColdfusionHtmlLexer(DelegatingLexer):
+ """
+ Coldfusion markup in html
+ """
+ name = 'Coldufsion HTML'
+ aliases = ['cfm']
+ filenames = ['*.cfm', '*.cfml', '*.cfc']
+ mimetypes = ['application/x-coldfusion']
+
+ def __init__(self, **options):
+ super(ColdfusionHtmlLexer, self).__init__(HtmlLexer, ColdfusionMarkupLexer,
+ **options)
+
diff --git a/pygments/lexers/text.py b/pygments/lexers/text.py
index 1d7f89f..6b22370 100644
--- a/pygments/lexers/text.py
+++ b/pygments/lexers/text.py
@@ -10,10 +10,6 @@
"""
import re
-try:
- set
-except NameError:
- from sets import Set as set
from bisect import bisect
from pygments.lexer import Lexer, LexerContext, RegexLexer, ExtendedRegexLexer, \
@@ -638,7 +634,8 @@ class RstLexer(RegexLexer):
tokens = {
'root': [
# Heading with overline
- (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)(.+)(\n)(\1)(\n)',
+ (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
+ r'(.+)(\n)(\1)(\n)',
bygroups(Generic.Heading, Text, Generic.Heading,
Text, Generic.Heading, Text)),
# Plain heading
@@ -658,24 +655,33 @@ class RstLexer(RegexLexer):
bygroups(Text, Number, using(this, state='inline'))),
(r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
bygroups(Text, Number, using(this, state='inline'))),
+ # Line blocks
+ (r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
+ bygroups(Text, Operator, using(this, state='inline'))),
# Sourcecode directives
(r'^( *\.\.)(\s*)((?:source)?code)(::)([ \t]*)([^\n]+)'
r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*|)\n)+)',
_handle_sourcecode),
# A directive
- (r'^( *\.\.)(\s*)([\w-]+)(::)(?:([ \t]*)(.+))?',
- bygroups(Punctuation, Text, Operator.Word, Punctuation, Text, Keyword)),
+ (r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
+ bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
+ using(this, state='inline'))),
# A reference target
(r'^( *\.\.)(\s*)([\w\t ]+:)(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
# A footnote target
(r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
+ # A substitution def
+ (r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
+ bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
+ Punctuation, Text, using(this, state='inline'))),
# Comments
(r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
# Field list
- (r'^( *)(:.*?:)([ \t]+)(.*?)$', bygroups(Text, Name.Class, Text,
- Name.Function)),
+ (r'^( *)(:[a-zA-Z-]+:)(\s*)$', bygroups(Text, Name.Class, Text)),
+ (r'^( *)(:.*?:)([ \t]+)(.*?)$',
+ bygroups(Text, Name.Class, Text, Name.Function)),
# Definition list
(r'^([^ ].*(?<!::)\n)((?:(?: +.*)\n)+)',
bygroups(using(this, state='inline'), using(this, state='inline'))),
@@ -687,12 +693,13 @@ class RstLexer(RegexLexer):
'inline': [
(r'\\.', Text), # escape
(r'``', String, 'literal'), # code
- (r'(`)(.+?)(`__?)',
- bygroups(Punctuation, using(this), Punctuation)), # reference
- (r'(`.+?`)(:[a-zA-Z0-9-]+?:)?',
+ (r'(`.+?)(<.+?>)(`__?)', # reference with inline target
+ bygroups(String, String.Interpol, String)),
+ (r'`.+?`__?', String), # reference
+ (r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
bygroups(Name.Variable, Name.Attribute)), # role
- (r'(:[a-zA-Z0-9-]+?:)(`.+?`)',
- bygroups(Name.Attribute, Name.Variable)), # user-defined role
+ (r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
+ bygroups(Name.Attribute, Name.Variable)), # role (content first)
(r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
(r'\*.+?\*', Generic.Emph), # Emphasis
(r'\[.*?\]_', String), # Footnote or citation
@@ -1013,6 +1020,7 @@ class DebianControlLexer(RegexLexer):
(r'[}]', Text),
(r'[^,]$', Name.Function, '#pop'),
(r'([\+\.a-zA-Z0-9-][\s\n]*)', Name.Function),
+ (r'\[.*?\]', Name.Entity),
],
'depend_vers': [
(r'\),', Text, '#pop'),
@@ -1503,6 +1511,7 @@ class NginxConfLexer(RegexLexer):
(r'[^\s;#{}$]+', String), # catch all
(r'/[^\s;#]*', Name), # pathname
(r'\s+', Text),
+ (r'[$;]', Text), # leftover characters
],
}
diff --git a/pygments/lexers/web.py b/pygments/lexers/web.py
index 598611d..ec0b27b 100644
--- a/pygments/lexers/web.py
+++ b/pygments/lexers/web.py
@@ -10,21 +10,20 @@
"""
import re
-try:
- set
-except NameError:
- from sets import Set as set
-from pygments.lexer import RegexLexer, bygroups, using, include, this
+from pygments.lexer import RegexLexer, ExtendedRegexLexer, bygroups, using, \
+ include, this
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Punctuation
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
html_doctype_matches
+from pygments.lexers.agile import RubyLexer
__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'CssLexer',
'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer',
- 'MxmlLexer']
+ 'MxmlLexer', 'HaxeLexer', 'HamlLexer', 'SassLexer',
+ 'ObjectiveJLexer', 'CoffeeScriptLexer']
class JavascriptLexer(RegexLexer):
@@ -312,7 +311,7 @@ class CssLexer(RegexLexer):
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
- r'outline-style|outline-width|overflow|padding-bottom|'
+ r'outline-style|outline-width|overflow(?:-x|-y|)|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
@@ -390,6 +389,229 @@ class CssLexer(RegexLexer):
}
+class ObjectiveJLexer(RegexLexer):
+ """
+ For Objective-J source code with preprocessor directives.
+
+ *New in Pygments 1.3.*
+ """
+
+ name = 'Objective-J'
+ aliases = ['objective-j', 'objectivej', 'obj-j', 'objj']
+ filenames = ['*.j']
+ mimetypes = ['text/x-objective-j']
+
+ #: optional Comment or Whitespace
+ _ws = r'(?:\s|//.*?\n|/[*].*?[*]/)*'
+
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+
+ # function definition
+ (r'^(' + _ws + r'[\+-]' + _ws + r')([\(a-zA-Z_].*?[^\(])(' + _ws + '{)',
+ bygroups(using(this), using(this, state='function_signature'),
+ using(this))),
+
+ # class definition
+ (r'(@interface|@implementation)(\s+)', bygroups(Keyword, Text),
+ 'classname'),
+ (r'(@class|@protocol)(\s*)', bygroups(Keyword, Text),
+ 'forward_classname'),
+ (r'(\s*)(@end)(\s*)', bygroups(Text, Keyword, Text)),
+
+ include('statements'),
+ ('[{\(\)}]', Punctuation),
+ (';', Punctuation),
+ ],
+ 'whitespace': [
+ (r'(@import)(\s+)("(\\\\|\\"|[^"])*")',
+ bygroups(Comment.Preproc, Text, String.Double)),
+ (r'(@import)(\s+)(<(\\\\|\\>|[^>])*>)',
+ bygroups(Comment.Preproc, Text, String.Double)),
+ (r'(#(?:include|import))(\s+)("(\\\\|\\"|[^"])*")',
+ bygroups(Comment.Preproc, Text, String.Double)),
+ (r'(#(?:include|import))(\s+)(<(\\\\|\\>|[^>])*>)',
+ bygroups(Comment.Preproc, Text, String.Double)),
+
+ (r'#if\s+0', Comment.Preproc, 'if0'),
+ (r'#', Comment.Preproc, 'macro'),
+
+ (r'\n', Text),
+ (r'\s+', Text),
+ (r'\\\n', Text), # line continuation
+ (r'//(\n|(.|\n)*?[^\\]\n)', Comment.Single),
+ (r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
+ (r'<!--', Comment),
+ ],
+ 'slashstartsregex': [
+ include('whitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gim]+\b|\B)', String.Regex, '#pop'),
+ (r'(?=/)', Text, ('#pop', 'badregex')),
+ (r'', Text, '#pop'),
+ ],
+ 'badregex': [
+ ('\n', Text, '#pop'),
+ ],
+ 'statements': [
+ (r'(L|@)?"', String, 'string'),
+ (r"(L|@)?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'",
+ String.Char),
+ (r'"(\\\\|\\"|[^"])*"', String.Double),
+ (r"'(\\\\|\\'|[^'])*'", String.Single),
+ (r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
+ (r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
+ (r'0x[0-9a-fA-F]+[Ll]?', Number.Hex),
+ (r'0[0-7]+[Ll]?', Number.Oct),
+ (r'\d+[Ll]?', Number.Integer),
+
+ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
+
+ (r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
+ r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?',
+ Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+
+ (r'(for|in|while|do|break|return|continue|switch|case|default|if|'
+ r'else|throw|try|catch|finally|new|delete|typeof|instanceof|void|'
+ r'prototype|__proto__)\b', Keyword, 'slashstartsregex'),
+
+ (r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
+
+ (r'(@selector|@private|@protected|@public|@encode|'
+ r'@synchronized|@try|@throw|@catch|@finally|@end|@property|'
+ r'@synthesize|@dynamic|@for|@accessors|new)\b', Keyword),
+
+ (r'(int|long|float|short|double|char|unsigned|signed|void|'
+ r'id|BOOL|bool|boolean|IBOutlet|IBAction|SEL|@outlet|@action)\b',
+ Keyword.Type),
+
+ (r'(self|super)\b', Name.Builtin),
+
+ (r'(TRUE|YES|FALSE|NO|Nil|nil|NULL)\b', Keyword.Constant),
+ (r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
+ (r'(ABS|ASIN|ACOS|ATAN|ATAN2|SIN|COS|TAN|EXP|POW|CEIL|FLOOR|ROUND|'
+ r'MIN|MAX|RAND|SQRT|E|LN2|LN10|LOG2E|LOG10E|PI|PI2|PI_2|SQRT1_2|'
+ r'SQRT2)\b', Keyword.Constant),
+
+ (r'(Array|Boolean|Date|Error|Function|Math|netscape|'
+ r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
+ r'decodeURIComponent|encodeURI|encodeURIComponent|'
+ r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
+ r'window)\b', Name.Builtin),
+
+ (r'([$a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r')(?=\()',
+ bygroups(Name.Function, using(this))),
+
+ (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name),
+ ],
+ 'classname' : [
+ # interface definition that inherits
+ (r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r':' + _ws +
+ r')([a-zA-Z_][a-zA-Z0-9_]*)?',
+ bygroups(Name.Class, using(this), Name.Class), '#pop'),
+ # interface definition for a category
+ (r'([a-zA-Z_][a-zA-Z0-9_]*)(' + _ws + r'\()([a-zA-Z_][a-zA-Z0-9_]*)(\))',
+ bygroups(Name.Class, using(this), Name.Label, Text), '#pop'),
+ # simple interface / implementation
+ (r'([a-zA-Z_][a-zA-Z0-9_]*)', Name.Class, '#pop'),
+ ],
+ 'forward_classname' : [
+ (r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*,\s*)',
+ bygroups(Name.Class, Text), '#push'),
+ (r'([a-zA-Z_][a-zA-Z0-9_]*)(\s*;?)',
+ bygroups(Name.Class, Text), '#pop'),
+ ],
+ 'function_signature': [
+ include('whitespace'),
+
+ # start of a selector w/ parameters
+ (r'(\(' + _ws + r')' # open paren
+ r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
+ r'(' + _ws + r'\)' + _ws + r')' # close paren
+ r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
+ bygroups(using(this), Keyword.Type, using(this),
+ Name.Function), 'function_parameters'),
+
+ # no-param function
+ (r'(\(' + _ws + r')' # open paren
+ r'([a-zA-Z_][a-zA-Z0-9_]+)' # return type
+ r'(' + _ws + r'\)' + _ws + r')' # close paren
+ r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
+ bygroups(using(this), Keyword.Type, using(this),
+ Name.Function), "#pop"),
+
+ # no return type given, start of a selector w/ parameters
+ (r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
+ bygroups (Name.Function), 'function_parameters'),
+
+ # no return type given, no-param function
+ (r'([$a-zA-Z_][a-zA-Z0-9_]+)', # function name
+ bygroups(Name.Function), "#pop"),
+
+ ('', Text, '#pop'),
+ ],
+ 'function_parameters': [
+ include('whitespace'),
+
+ # parameters
+ (r'(\(' + _ws + ')' # open paren
+ r'([^\)]+)' # type
+ r'(' + _ws + r'\)' + _ws + r')+' # close paren
+ r'([$a-zA-Z_][a-zA-Z0-9_]+)', # param name
+ bygroups(using(this), Keyword.Type, using(this), Text)),
+
+ # one piece of a selector name
+ (r'([$a-zA-Z_][a-zA-Z0-9_]+' + _ws + r':)', # function name
+ Name.Function),
+
+ # smallest possible selector piece
+ (r'(:)', Name.Function),
+
+ # var args
+ (r'(,' + _ws + r'...)', using(this)),
+
+ # param name
+ (r'([$a-zA-Z_][a-zA-Z0-9_]+)', Text),
+ ],
+ 'expression' : [
+ (r'([$a-zA-Z_][a-zA-Z0-9_]*)(\()', bygroups(Name.Function,
+ Punctuation)),
+ (r'(\))', Punctuation, "#pop"),
+ ],
+ 'string': [
+ (r'"', String, '#pop'),
+ (r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
+ (r'[^\\"\n]+', String), # all other characters
+ (r'\\\n', String), # line continuation
+ (r'\\', String), # stray backslash
+ ],
+ 'macro': [
+ (r'[^/\n]+', Comment.Preproc),
+ (r'/[*](.|\n)*?[*]/', Comment.Multiline),
+ (r'//.*?\n', Comment.Single, '#pop'),
+ (r'/', Comment.Preproc),
+ (r'(?<=\\)\n', Comment.Preproc),
+ (r'\n', Comment.Preproc, '#pop'),
+ ],
+ 'if0': [
+ (r'^\s*#if.*?(?<!\\)\n', Comment.Preproc, '#push'),
+ (r'^\s*#endif.*?(?<!\\)\n', Comment.Preproc, '#pop'),
+ (r'.*?\n', Comment),
+ ]
+ }
+
+ def analyse_text(text):
+ if re.search('^\s*@import\s+[<"]', text, re.MULTILINE):
+ # special directive found in most Objective-J files
+ return True
+ return False
+
+
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
@@ -493,7 +715,7 @@ class PhpLexer(RegexLexer):
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
- (r'<<<([a-zA-Z_][a-zA-Z0-9_]*)\n.*?\n\1\;?\n', String),
+ (r'<<<(\'?)([a-zA-Z_][a-zA-Z0-9_]*)\1\n.*?\n\2\;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
@@ -507,6 +729,7 @@ class PhpLexer(RegexLexer):
(r'[~!%^&*+=|:.<>/?@-]+', Operator),
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
+ (r'(function)(\s*)(?=\()', bygroups(Keyword, Text)),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
@@ -520,11 +743,11 @@ class PhpLexer(RegexLexer):
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
- r'catch|throw|this)\b', Keyword),
+ r'catch|throw|this|use|namespace)\b', Keyword),
('(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
- ('[a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
+ (r'[\\a-zA-Z_][\\a-zA-Z0-9_]*', Name.Other),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
@@ -532,7 +755,7 @@ class PhpLexer(RegexLexer):
(r'"', String.Double, 'string'),
],
'classname': [
- (r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
+ (r'[a-zA-Z_][\\a-zA-Z0-9_]*', Name.Class, '#pop')
],
'functionname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
@@ -680,7 +903,6 @@ class XsltLexer(XmlLexer):
return 0.8
-
class MxmlLexer(RegexLexer):
"""
For MXML markup.
@@ -721,3 +943,677 @@ class MxmlLexer(RegexLexer):
(r'[^\s>]+', String, '#pop'),
],
}
+
+
+class HaxeLexer(RegexLexer):
+ """
+ For haXe source code (http://haxe.org/).
+ """
+
+ name = 'haXe'
+ aliases = ['hx', 'haXe']
+ filenames = ['*.hx']
+ mimetypes = ['text/haxe']
+
+ ident = r'(?:[a-zA-Z_][a-zA-Z0-9_]*)'
+ typeid = r'(?:(?:[a-z0-9_\.])*[A-Z_][A-Za-z0-9_]*)'
+ key_prop = r'(?:default|null|never)'
+ key_decl_mod = r'(?:public|private|override|static|inline|extern|dynamic)'
+
+ flags = re.DOTALL | re.MULTILINE
+
+ tokens = {
+ 'root': [
+ include('whitespace'),
+ include('comments'),
+ (key_decl_mod, Keyword.Declaration),
+ include('enumdef'),
+ include('typedef'),
+ include('classdef'),
+ include('imports'),
+ ],
+
+ # General constructs
+ 'comments': [
+ (r'//.*?\n', Comment.Single),
+ (r'/\*.*?\*/', Comment.Multiline),
+ (r'#[^\n]*', Comment.Preproc),
+ ],
+ 'whitespace': [
+ include('comments'),
+ (r'\s+', Text),
+ ],
+ 'codekeywords': [
+ (r'\b(if|else|while|do|for|in|break|continue|'
+ r'return|switch|case|try|catch|throw|null|trace|'
+ r'new|this|super|untyped|cast|callback|here)\b',
+ Keyword.Reserved),
+ ],
+ 'literals': [
+ (r'0[xX][0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r"'(\\\\|\\'|[^'])*'", String.Single),
+ (r'"(\\\\|\\"|[^"])*"', String.Double),
+ (r'~/([^\n])*?/[gisx]*', String.Regex),
+ (r'\b(true|false|null)\b', Keyword.Constant),
+ ],
+ 'codeblock': [
+ include('whitespace'),
+ include('new'),
+ include('case'),
+ include('anonfundef'),
+ include('literals'),
+ include('vardef'),
+ include('codekeywords'),
+ (r'[();,\[\]]', Punctuation),
+ (r'(?:=|\+=|-=|\*=|/=|%=|&=|\|=|\^=|<<=|>>=|>>>=|\|\||&&|'
+ r'\.\.\.|==|!=|>|<|>=|<=|\||&|\^|<<|>>|>>>|\+|\-|\*|/|%|'
+ r'!|\+\+|\-\-|~|\.|\?|\:)',
+ Operator),
+ (ident, Name),
+
+ (r'}', Punctuation,'#pop'),
+ (r'{', Punctuation,'#push'),
+ ],
+
+ # Instance/Block level constructs
+ 'propertydef': [
+ (r'(\()(' + key_prop + ')(,)(' + key_prop + ')(\))',
+ bygroups(Punctuation, Keyword.Reserved, Punctuation,
+ Keyword.Reserved, Punctuation)),
+ ],
+ 'new': [
+ (r'\bnew\b', Keyword, 'typedecl'),
+ ],
+ 'case': [
+ (r'\b(case)(\s+)(' + ident + ')(\s*)(\()',
+ bygroups(Keyword.Reserved, Text, Name, Text, Punctuation),
+ 'funargdecl'),
+ ],
+ 'vardef': [
+ (r'\b(var)(\s+)(' + ident + ')',
+ bygroups(Keyword.Declaration, Text, Name.Variable), 'vardecl'),
+ ],
+ 'vardecl': [
+ include('whitespace'),
+ include('typelabel'),
+ (r'=', Operator,'#pop'),
+ (r';', Punctuation,'#pop'),
+ ],
+ 'instancevardef': [
+ (key_decl_mod,Keyword.Declaration),
+ (r'\b(var)(\s+)(' + ident + ')',
+ bygroups(Keyword.Declaration, Text, Name.Variable.Instance),
+ 'instancevardecl'),
+ ],
+ 'instancevardecl': [
+ include('vardecl'),
+ include('propertydef'),
+ ],
+
+ 'anonfundef': [
+ (r'\bfunction\b', Keyword.Declaration, 'fundecl'),
+ ],
+ 'instancefundef': [
+ (key_decl_mod, Keyword.Declaration),
+ (r'\b(function)(\s+)(' + ident + ')',
+ bygroups(Keyword.Declaration, Text, Name.Function), 'fundecl'),
+ ],
+ 'fundecl': [
+ include('whitespace'),
+ include('typelabel'),
+ include('generictypedecl'),
+ (r'\(',Punctuation,'funargdecl'),
+ (r'(?=[a-zA-Z0-9_])',Text,'#pop'),
+ (r'{',Punctuation,('#pop','codeblock')),
+ (r';',Punctuation,'#pop'),
+ ],
+ 'funargdecl': [
+ include('whitespace'),
+ (ident, Name.Variable),
+ include('typelabel'),
+ include('literals'),
+ (r'=', Operator),
+ (r',', Punctuation),
+ (r'\?', Punctuation),
+ (r'\)', Punctuation, '#pop'),
+ ],
+
+ 'typelabel': [
+ (r':', Punctuation, 'type'),
+ ],
+ 'typedecl': [
+ include('whitespace'),
+ (typeid, Name.Class),
+ (r'<', Punctuation, 'generictypedecl'),
+ (r'(?=[{}()=,a-z])', Text,'#pop'),
+ ],
+ 'type': [
+ include('whitespace'),
+ (typeid, Name.Class),
+ (r'<', Punctuation, 'generictypedecl'),
+ (r'->', Keyword.Type),
+ (r'(?=[{}(),;=])', Text, '#pop'),
+ ],
+ 'generictypedecl': [
+ include('whitespace'),
+ (typeid, Name.Class),
+ (r'<', Punctuation, '#push'),
+ (r'>', Punctuation, '#pop'),
+ (r',', Punctuation),
+ ],
+
+ # Top level constructs
+ 'imports': [
+ (r'(package|import|using)(\s+)([^;]+)(;)',
+ bygroups(Keyword.Namespace, Text, Name.Namespace,Punctuation)),
+ ],
+ 'typedef': [
+ (r'typedef', Keyword.Declaration, ('typedefprebody', 'typedecl')),
+ ],
+ 'typedefprebody': [
+ include('whitespace'),
+ (r'(=)(\s*)({)', bygroups(Punctuation, Text, Punctuation),
+ ('#pop', 'typedefbody')),
+ ],
+ 'enumdef': [
+ (r'enum', Keyword.Declaration, ('enumdefprebody', 'typedecl')),
+ ],
+ 'enumdefprebody': [
+ include('whitespace'),
+ (r'{', Punctuation, ('#pop','enumdefbody')),
+ ],
+ 'classdef': [
+ (r'class', Keyword.Declaration, ('classdefprebody', 'typedecl')),
+ ],
+ 'classdefprebody': [
+ include('whitespace'),
+ (r'(extends|implements)', Keyword.Declaration,'typedecl'),
+ (r'{', Punctuation, ('#pop', 'classdefbody')),
+ ],
+ 'interfacedef': [
+ (r'interface', Keyword.Declaration,
+ ('interfacedefprebody', 'typedecl')),
+ ],
+ 'interfacedefprebody': [
+ include('whitespace'),
+ (r'(extends)', Keyword.Declaration, 'typedecl'),
+ (r'{', Punctuation, ('#pop', 'classdefbody')),
+ ],
+
+ 'typedefbody': [
+ include('whitespace'),
+ include('instancevardef'),
+ include('instancefundef'),
+ (r'>', Punctuation, 'typedecl'),
+ (r',', Punctuation),
+ (r'}', Punctuation, '#pop'),
+ ],
+ 'enumdefbody': [
+ include('whitespace'),
+ (ident, Name.Variable.Instance),
+ (r'\(', Punctuation, 'funargdecl'),
+ (r';', Punctuation),
+ (r'}', Punctuation, '#pop'),
+ ],
+ 'classdefbody': [
+ include('whitespace'),
+ include('instancevardef'),
+ include('instancefundef'),
+ (r'}', Punctuation, '#pop'),
+ include('codeblock'),
+ ],
+ }
+
+ def analyse_text(text):
+ if re.match(r'\w+\s*:\s*\w', text): return 0.3
+
+
+def _indentation(lexer, match, ctx):
+ indentation = match.group(0)
+ yield match.start(), Text, indentation
+ ctx.last_indentation = indentation
+ ctx.pos = match.end()
+
+ if hasattr(ctx, 'block_state') and ctx.block_state and \
+ indentation.startswith(ctx.block_indentation) and \
+ indentation != ctx.block_indentation:
+ ctx.stack.append(ctx.block_state)
+ else:
+ ctx.block_state = None
+ ctx.block_indentation = None
+ ctx.stack.append('content')
+
+def _starts_block(token, state):
+ def callback(lexer, match, ctx):
+ yield match.start(), token, match.group(0)
+
+ if hasattr(ctx, 'last_indentation'):
+ ctx.block_indentation = ctx.last_indentation
+ else:
+ ctx.block_indentation = ''
+
+ ctx.block_state = state
+ ctx.pos = match.end()
+
+ return callback
+
+
+class HamlLexer(ExtendedRegexLexer):
+ """
+ For Haml markup.
+
+ *New in Pygments 1.3.*
+ """
+
+ name = 'Haml'
+ aliases = ['haml', 'HAML']
+ filenames = ['*.haml']
+ mimetypes = ['text/x-haml']
+
+ flags = re.IGNORECASE
+ # Haml can include " |\n" anywhere,
+ # which is ignored and used to wrap long lines.
+ # To accomodate this, use this custom faux dot instead.
+ _dot = r'(?: \|\n(?=.* \|)|.)'
+ tokens = {
+ 'root': [
+ (r'[ \t]*\n', Text),
+ (r'[ \t]*', _indentation),
+ ],
+
+ 'css': [
+ (r'\.[a-z0-9_:-]+', Name.Class, 'tag'),
+ (r'\#[a-z0-9_:-]+', Name.Function, 'tag'),
+ ],
+
+ 'eval-or-plain': [
+ (r'[&!]?==', Punctuation, 'plain'),
+ (r'([&!]?[=~])(' + _dot + '*\n)',
+ bygroups(Punctuation, using(RubyLexer)),
+ 'root'),
+ (r'', Text, 'plain'),
+ ],
+
+ 'content': [
+ include('css'),
+ (r'%[a-z0-9_:-]+', Name.Tag, 'tag'),
+ (r'!!!' + _dot + '*\n', Name.Namespace, '#pop'),
+ (r'(/)(\[' + _dot + '*?\])(' + _dot + '*\n)',
+ bygroups(Comment, Comment.Special, Comment),
+ '#pop'),
+ (r'/' + _dot + '*\n', _starts_block(Comment, 'html-comment-block'),
+ '#pop'),
+ (r'-#' + _dot + '*\n', _starts_block(Comment.Preproc,
+ 'haml-comment-block'), '#pop'),
+ (r'(-)(' + _dot + '*\n)',
+ bygroups(Punctuation, using(RubyLexer)),
+ '#pop'),
+ (r':' + _dot + '*\n', _starts_block(Name.Decorator, 'filter-block'),
+ '#pop'),
+ include('eval-or-plain'),
+ ],
+
+ 'tag': [
+ include('css'),
+ (r'\{(,\n|' + _dot + ')*?\}', using(RubyLexer)),
+ (r'\[' + _dot + '*?\]', using(RubyLexer)),
+ (r'\(', Text, 'html-attributes'),
+ (r'/[ \t]*\n', Punctuation, '#pop:2'),
+ (r'[<>]{1,2}(?=[ \t=])', Punctuation),
+ include('eval-or-plain'),
+ ],
+
+ 'plain': [
+ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
+ (r'(#\{)(' + _dot + '*?)(\})',
+ bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'html-attributes': [
+ (r'\s+', Text),
+ (r'[a-z0-9_:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
+ (r'[a-z0-9_:-]+', Name.Attribute),
+ (r'\)', Text, '#pop'),
+ ],
+
+ 'html-attribute-value': [
+ (r'[ \t]+', Text),
+ (r'[a-z0-9_]+', Name.Variable, '#pop'),
+ (r'@[a-z0-9_]+', Name.Variable.Instance, '#pop'),
+ (r'\$[a-z0-9_]+', Name.Variable.Global, '#pop'),
+ (r"'(\\\\|\\'|[^'\n])*'", String, '#pop'),
+ (r'"(\\\\|\\"|[^"\n])*"', String, '#pop'),
+ ],
+
+ 'html-comment-block': [
+ (_dot + '+', Comment),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'haml-comment-block': [
+ (_dot + '+', Comment.Preproc),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'filter-block': [
+ (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
+ (r'(#\{)(' + _dot + '*?)(\})',
+ bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
+ (r'\n', Text, 'root'),
+ ],
+ }
+
+
+class SassLexer(ExtendedRegexLexer):
+ """
+ For Sass stylesheets.
+
+ *New in Pygments 1.3.*
+ """
+
+ name = 'Sass'
+ aliases = ['sass', 'SASS']
+ filenames = ['*.sass']
+ mimetypes = ['text/x-sass']
+
+ flags = re.IGNORECASE
+ tokens = {
+ 'root': [
+ (r'[ \t]*\n', Text),
+ (r'[ \t]*', _indentation),
+ ],
+
+ 'content': [
+ (r'//[^\n]*', _starts_block(Comment.Single, 'single-comment'),
+ 'root'),
+ (r'/\*[^\n]*', _starts_block(Comment.Multiline, 'multi-comment'),
+ 'root'),
+ (r'@import', Keyword, 'import'),
+ (r'@for', Keyword, 'for'),
+ (r'@(debug|if|while)', Keyword, 'script'),
+ (r'@[a-z0-9_-]+', Keyword, 'selector'),
+ (r'=[\w-]+', Name.Function, 'script'),
+ (r'\+[\w-]+', Name.Decorator, 'script'),
+ (r'(![a-z_]\w*)([ \t]*(?:\|\|)?=)',
+ bygroups(Name.Variable, Operator), 'script'),
+ (r':', Name.Attribute, 'old-style-attr'),
+ (r'(?=[^\s:"\[]+\s*[=:]([ \t]|$))', Name.Attribute, 'new-style-attr'),
+ (r'', Text, 'selector'),
+ ],
+
+ 'single-comment': [
+ (r'.+', Comment.Single),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'multi-comment': [
+ (r'.+', Comment.Multiline),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'import': [
+ (r'[ \t]+', Text),
+ (r'[^\s]+', String),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'for': [
+ (r'(from|to|through)', Operator.Word),
+ include('script'),
+ ],
+
+ 'old-style-attr': [
+ (r'[^\s:="\[]+', Name.Attribute),
+ (r'#{', String.Interpol, 'interpolation'),
+ (r'[ \t]*=', Operator, 'script'),
+ (r'', Text, 'value'),
+ ],
+
+ 'new-style-attr': [
+ (r'[^\s:="\[]+', Name.Attribute),
+ (r'#{', String.Interpol, 'interpolation'),
+ (r'[ \t]*=', Operator, 'script'),
+ (r':', Name.Attribute, 'value'),
+ ],
+
+ 'value': [
+ (r'[ \t]+', Text),
+ (r'url\(', String.Other, 'string-url'),
+ (r'(azimuth|background-attachment|background-color|'
+ r'background-image|background-position|background-repeat|'
+ r'background|border-bottom-color|border-bottom-style|'
+ r'border-bottom-width|border-left-color|border-left-style|'
+ r'border-left-width|border-right|border-right-color|'
+ r'border-right-style|border-right-width|border-top-color|'
+ r'border-top-style|border-top-width|border-bottom|'
+ r'border-collapse|border-left|border-width|border-color|'
+ r'border-spacing|border-style|border-top|border|caption-side|'
+ r'clear|clip|color|content|counter-increment|counter-reset|'
+ r'cue-after|cue-before|cue|cursor|direction|display|'
+ r'elevation|empty-cells|float|font-family|font-size|'
+ r'font-size-adjust|font-stretch|font-style|font-variant|'
+ r'font-weight|font|height|letter-spacing|line-height|'
+ r'list-style-type|list-style-image|list-style-position|'
+ r'list-style|margin-bottom|margin-left|margin-right|'
+ r'margin-top|margin|marker-offset|marks|max-height|max-width|'
+ r'min-height|min-width|opacity|orphans|outline|outline-color|'
+ r'outline-style|outline-width|overflow|padding-bottom|'
+ r'padding-left|padding-right|padding-top|padding|page|'
+ r'page-break-after|page-break-before|page-break-inside|'
+ r'pause-after|pause-before|pause|pitch|pitch-range|'
+ r'play-during|position|quotes|richness|right|size|'
+ r'speak-header|speak-numeral|speak-punctuation|speak|'
+ r'speech-rate|stress|table-layout|text-align|text-decoration|'
+ r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
+ r'vertical-align|visibility|voice-family|volume|white-space|'
+ r'widows|width|word-spacing|z-index|bottom|left|'
+ r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
+ r'behind|below|bidi-override|blink|block|bold|bolder|both|'
+ r'capitalize|center-left|center-right|center|circle|'
+ r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
+ r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
+ r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
+ r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
+ r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
+ r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
+ r'inherit|inline-table|inline|inset|inside|invert|italic|'
+ r'justify|katakana-iroha|katakana|landscape|larger|large|'
+ r'left-side|leftwards|level|lighter|line-through|list-item|'
+ r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
+ r'lower|low|medium|message-box|middle|mix|monospace|'
+ r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
+ r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
+ r'open-quote|outset|outside|overline|pointer|portrait|px|'
+ r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
+ r'rightwards|s-resize|sans-serif|scroll|se-resize|'
+ r'semi-condensed|semi-expanded|separate|serif|show|silent|'
+ r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
+ r'spell-out|square|static|status-bar|super|sw-resize|'
+ r'table-caption|table-cell|table-column|table-column-group|'
+ r'table-footer-group|table-header-group|table-row|'
+ r'table-row-group|text|text-bottom|text-top|thick|thin|'
+ r'transparent|ultra-condensed|ultra-expanded|underline|'
+ r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
+ r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
+ r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Name.Constant),
+ (r'(indigo|gold|firebrick|indianred|yellow|darkolivegreen|'
+ r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
+ r'mediumslateblue|black|springgreen|crimson|lightsalmon|brown|'
+ r'turquoise|olivedrab|cyan|silver|skyblue|gray|darkturquoise|'
+ r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|teal|'
+ r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
+ r'violet|navy|orchid|blue|ghostwhite|honeydew|cornflowerblue|'
+ r'darkblue|darkkhaki|mediumpurple|cornsilk|red|bisque|slategray|'
+ r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
+ r'gainsboro|mediumturquoise|floralwhite|coral|purple|lightgrey|'
+ r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
+ r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
+ r'lightcoral|orangered|navajowhite|lime|palegreen|burlywood|'
+ r'seashell|mediumspringgreen|fuchsia|papayawhip|blanchedalmond|'
+ r'peru|aquamarine|white|darkslategray|ivory|dodgerblue|'
+ r'lemonchiffon|chocolate|orange|forestgreen|slateblue|olive|'
+ r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
+ r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
+ r'plum|aqua|darkgoldenrod|maroon|sandybrown|magenta|tan|'
+ r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
+ r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
+ r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
+ r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
+ r'lightyellow|lavenderblush|linen|mediumaquamarine|green|'
+ r'blueviolet|peachpuff)\b', Name.Entity),
+ (r'\!important', Name.Exception),
+ (r'/\*', Comment, 'inline-comment'),
+ (r'\#[a-z0-9]{1,6}', Number.Hex),
+ (r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
+ (r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
+ (r'#{', String.Interpol, 'interpolation'),
+ (r'[~\^\*!&%<>\|+=@:,./?-]+', Operator),
+ (r'[\[\]();]+', Punctuation),
+ (r'"', String.Double, 'string-double'),
+ (r"'", String.Single, 'string-single'),
+ (r'[a-z][\w-]*', Name),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'script': [
+ (r'[ \t]+', Text),
+ (r'![\w_]+', Name.Variable),
+ (r'[+\-*/%=(),!><]', Operator),
+ (r'"', String.Double, 'string-double'),
+ (r"'", String.Single, 'string-single'),
+ (r'\#[a-z0-9]{1,6}', Number.Hex),
+ (r'(-?\d+)(\%|[a-z]+)?', bygroups(Number.Integer, Keyword.Type)),
+ (r'(-?\d*\.\d+)(\%|[a-z]+)?', bygroups(Number.Float, Keyword.Type)),
+ (r'(black|silver|gray|white|maroon|red|purple|fuchsia|green|'
+ r'lime|olive|yellow|navy|blue|teal|aqua)\b', Name.Builtin),
+ (r'(true|false)', Name.Pseudo),
+ (r'(and|or|not)', Operator.Word),
+ (r'(\\.|[^\s\\+*\/%(),=!])+(?=[ \t]*\()', Name.Function),
+ (r'(\\.|[^\s\\+*\/%(),=!])+', Name),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'interpolation': [
+ (r'\}', String.Interpol, '#pop'),
+ include('script'),
+ ],
+
+ 'selector': [
+ (r'[ \t]+', Text),
+ (r'\:', Name.Decorator, 'pseudo-class'),
+ (r'\.', Name.Class, 'class'),
+ (r'\#', Name.Namespace, 'id'),
+ (r'[a-zA-Z0-9_-]+', Name.Tag),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'&', Keyword),
+ (r'[~\^\*!&\[\]\(\)<>\|+=@:;,./?-]', Operator),
+ (r'"', String.Double, 'string-double'),
+ (r"'", String.Single, 'string-single'),
+ (r'\n', Text, 'root'),
+ ],
+
+ 'string-double': [
+ (r'(\\.|#(?=[^\n{])|[^\n"#])+', String.Double),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'"', String.Double, '#pop'),
+ ],
+
+ 'string-single': [
+ (r"(\\.|#(?=[^\n{])|[^\n'#])+", String.Double),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r"'", String.Double, '#pop'),
+ ],
+
+ 'string-url': [
+ (r'(\\#|#(?=[^\n{])|[^\n#)])+', String.Other),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'\)', String.Other, '#pop'),
+ ],
+
+ 'inline-comment': [
+ (r"(\\#|#(?=[^\n{])|\*(?=[^\n/])|[^\n#*])+", Comment),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r"\*/", Comment, '#pop'),
+ ],
+
+ 'pseudo-class': [
+ (r'[\w-]+', Name.Decorator),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'', Text, '#pop'),
+ ],
+
+ 'class': [
+ (r'[\w-]+', Name.Class),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'', Text, '#pop'),
+ ],
+
+ 'id': [
+ (r'[\w-]+', Name.Namespace),
+ (r'#\{', String.Interpol, 'interpolation'),
+ (r'', Text, '#pop'),
+ ],
+ }
+
+
+class CoffeeScriptLexer(RegexLexer):
+ """
+ For `CoffeeScript`_ source code.
+
+ .. _CoffeeScript: http://jashkenas.github.com/coffee-script/
+
+ *New in Pygments 1.3.*
+ """
+
+ name = 'CoffeeScript'
+ aliases = ['coffee-script', 'coffeescript']
+ filenames = ['*.coffee']
+ mimetypes = ['text/coffeescript']
+
+ flags = re.DOTALL
+ tokens = {
+ 'commentsandwhitespace': [
+ (r'\s+', Text),
+ (r'#.*?\n', Comment.Single),
+ ],
+ 'slashstartsregex': [
+ include('commentsandwhitespace'),
+ (r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
+ r'([gim]+\b|\B)', String.Regex, '#pop'),
+ (r'(?=/)', Text, ('#pop', 'badregex')),
+ (r'', Text, '#pop'),
+ ],
+ 'badregex': [
+ ('\n', Text, '#pop'),
+ ],
+ 'root': [
+ (r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
+ include('commentsandwhitespace'),
+ (r'\+\+|--|~|&&|\band\b|\bor\b|\bis\b|\bisnt\b|\bnot\b|\?|:|'
+ r'\|\||\\(?=\n)|(<<|>>>?|==?|!=?|[-<>+*`%&\|\^/])=?',
+ Operator, 'slashstartsregex'),
+ (r'[{(\[;,]', Punctuation, 'slashstartsregex'),
+ (r'[})\].]', Punctuation),
+ (r'(for|in|of|while|break|return|continue|switch|when|then|if|else|'
+ r'throw|try|catch|finally|new|delete|typeof|instanceof|super|'
+ r'extends|this)\b', Keyword, 'slashstartsregex'),
+ (r'(true|false|yes|no|on|off|null|NaN|Infinity|undefined)\b',
+ Keyword.Constant),
+ (r'(Array|Boolean|Date|Error|Function|Math|netscape|'
+ r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
+ r'decodeURIComponent|encodeURI|encodeURIComponent|'
+ r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
+ r'window)\b', Name.Builtin),
+ (r'[$a-zA-Z_][a-zA-Z0-9_\.:]*:\s', Name.Variable,
+ 'slashstartsregex'),
+ (r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
+ (r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
+ (r'0x[0-9a-fA-F]+', Number.Hex),
+ (r'[0-9]+', Number.Integer),
+ (r'"(\\\\|\\"|[^"])*"', String.Double),
+ (r"'(\\\\|\\'|[^'])*'", String.Single),
+ ]
+ }
diff --git a/pygments/token.py b/pygments/token.py
index f0a6541..4815ddf 100644
--- a/pygments/token.py
+++ b/pygments/token.py
@@ -8,11 +8,6 @@
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
-try:
- set
-except NameError:
- from sets import Set as set
-
class _TokenType(tuple):
parent = None
diff --git a/pygments/util.py b/pygments/util.py
index fc65b67..1bd1455 100644
--- a/pygments/util.py
+++ b/pygments/util.py
@@ -8,8 +8,10 @@
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
+
import re
import sys
+import codecs
split_path_re = re.compile(r'[/\\ ]')
@@ -206,6 +208,7 @@ if sys.version_info < (3,0):
import StringIO, cStringIO
BytesIO = cStringIO.StringIO
StringIO = StringIO.StringIO
+ uni_open = codecs.open
else:
import builtins
bytes = builtins.bytes
@@ -220,3 +223,4 @@ else:
import io
BytesIO = io.BytesIO
StringIO = io.StringIO
+ uni_open = builtins.open
diff --git a/scripts/find_error.py b/scripts/find_error.py
index a3cdad4..6b82fcd 100644..100755
--- a/scripts/find_error.py
+++ b/scripts/find_error.py
@@ -18,42 +18,148 @@ try:
import pygments
except ImportError:
# try parent path
- sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
+ sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
-from pygments import highlight
+
+from pygments.lexer import RegexLexer
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
-from pygments.token import Error
+from pygments.token import Error, Text, _TokenType
+
+
+class DebuggingRegexLexer(RegexLexer):
+ """Make the state stack, position and current match instance attributes."""
+
+ def get_tokens_unprocessed(self, text, stack=('root',)):
+ """
+ Split ``text`` into (tokentype, text) pairs.
-def main(fn):
- try:
- lx = get_lexer_for_filename(fn)
- except ValueError:
+ ``stack`` is the inital stack (default: ``['root']``)
+ """
+ self.pos = 0
+ tokendefs = self._tokens
+ self.statestack = list(stack)
+ statetokens = tokendefs[self.statestack[-1]]
+ while 1:
+ for rexmatch, action, new_state in statetokens:
+ self.m = m = rexmatch(text, self.pos)
+ if m:
+ if type(action) is _TokenType:
+ yield self.pos, action, m.group()
+ else:
+ for item in action(self, m):
+ yield item
+ self.pos = m.end()
+ if new_state is not None:
+ # state transition
+ if isinstance(new_state, tuple):
+ for state in new_state:
+ if state == '#pop':
+ self.statestack.pop()
+ elif state == '#push':
+ self.statestack.append(self.statestack[-1])
+ else:
+ self.statestack.append(state)
+ elif isinstance(new_state, int):
+ # pop
+ del self.statestack[new_state:]
+ elif new_state == '#push':
+ self.statestack.append(self.statestack[-1])
+ else:
+ assert False, 'wrong state def: %r' % new_state
+ statetokens = tokendefs[self.statestack[-1]]
+ break
+ else:
+ try:
+ if text[self.pos] == '\n':
+ # at EOL, reset state to 'root'
+ self.pos += 1
+ self.statestack = ['root']
+ statetokens = tokendefs['root']
+ yield self.pos, Text, u'\n'
+ continue
+ yield self.pos, Error, text[self.pos]
+ self.pos += 1
+ except IndexError:
+ break
+
+
+def main(fn, lexer=None):
+ if lexer is not None:
+ lx = get_lexer_by_name(lexer)
+ else:
try:
- name, rest = fn.split("_", 1)
- lx = get_lexer_by_name(name)
+ lx = get_lexer_for_filename(os.path.basename(fn))
except ValueError:
- raise AssertionError('no lexer found for file %r' % fn)
+ try:
+ name, rest = fn.split('_', 1)
+ lx = get_lexer_by_name(name)
+ except ValueError:
+ raise AssertionError('no lexer found for file %r' % fn)
+ debug_lexer = False
+ # does not work for e.g. ExtendedRegexLexers
+ if lx.__class__.__bases__ == (RegexLexer,):
+ lx.__class__.__bases__ = (DebuggingRegexLexer,)
+ debug_lexer = True
+ lno = 1
text = file(fn, 'U').read()
text = text.strip('\n') + '\n'
text = text.decode('latin1')
- ntext = []
+ tokens = []
+ states = []
+
+ def show_token(tok, state):
+ reprs = map(repr, tok)
+ print ' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0],
+ if debug_lexer:
+ print ' ' + ' ' * (29-len(reprs[0])) + repr(state),
+ print
+
for type, val in lx.get_tokens(text):
+ lno += val.count('\n')
if type == Error:
- print "Error parsing", fn
- print "\n".join([' ' + repr(x) for x in ntext[-num:]])
- print `val` + "<<<"
+ print 'Error parsing', fn, 'on line', lno
+ print 'Previous tokens' + (debug_lexer and ' and states' or '') + ':'
+ if showall:
+ for tok, state in zip(tokens, states):
+ show_token(tok, state)
+ else:
+ for i in range(len(tokens) - num, len(tokens)):
+ show_token(tokens[i], states[i])
+ print 'Error token:'
+ l = len(repr(val))
+ print ' ' + repr(val),
+ if debug_lexer and hasattr(lx, 'statestack'):
+ print ' ' * (60-l) + repr(lx.statestack),
+ print
+ print
return 1
- ntext.append((type,val))
+ tokens.append((type,val))
+ if debug_lexer:
+ if hasattr(lx, 'statestack'):
+ states.append(lx.statestack[:])
+ else:
+ states.append(None)
+ if showall:
+ for tok, state in zip(tokens, states):
+ show_token(tok, state)
return 0
num = 10
+showall = False
+lexer = None
-if __name__ == "__main__":
- if sys.argv[1][:2] == '-n':
- num = int(sys.argv[1][2:])
- del sys.argv[1]
+if __name__ == '__main__':
+ import getopt
+ opts, args = getopt.getopt(sys.argv[1:], 'n:l:a')
+ for opt, val in opts:
+ if opt == '-n':
+ num = int(val)
+ elif opt == '-a':
+ showall = True
+ elif opt == '-l':
+ lexer = val
ret = 0
- for f in sys.argv[1:]:
- ret += main(f)
+ for f in args:
+ ret += main(f, lexer)
sys.exit(bool(ret))
diff --git a/setup.py b/setup.py
index 6a863b9..eda31b5 100755
--- a/setup.py
+++ b/setup.py
@@ -29,6 +29,7 @@
try:
from setuptools import setup, find_packages
+ have_setuptools = True
except ImportError:
from distutils.core import setup
def find_packages():
@@ -39,15 +40,27 @@ except ImportError:
'pygments.styles',
'pygments.filters',
]
+ have_setuptools = False
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
from distutils.command.build_py import build_py
+if have_setuptools:
+ add_keywords = dict(
+ entry_points = {
+ 'console_scripts': ['pygmentize = pygments.cmdline:main'],
+ },
+ )
+else:
+ add_keywords = dict(
+ scripts = ['pygmentize'],
+ )
+
setup(
name = 'Pygments',
- version = '1.2.2',
+ version = '1.3',
url = 'http://pygments.org/',
license = 'BSD License',
author = 'Georg Brandl',
@@ -56,7 +69,6 @@ setup(
long_description = __doc__,
keywords = 'syntax highlighting',
packages = find_packages(),
- scripts = ['pygmentize'],
platforms = 'any',
zip_safe = False,
include_package_data = True,
@@ -72,4 +84,5 @@ setup(
'Operating System :: OS Independent',
],
cmdclass = {'build_py': build_py},
+ **add_keywords
)
diff --git a/tests/examplefiles/CPDictionary.j b/tests/examplefiles/CPDictionary.j
new file mode 100755
index 0000000..50243f1
--- /dev/null
+++ b/tests/examplefiles/CPDictionary.j
@@ -0,0 +1,611 @@
+/*
+ * CPDictionary.j
+ * Foundation
+ *
+ * Created by Francisco Tolmasky.
+ * Copyright 2008, 280 North, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+//@import "CPRange.j"
+@import "CPObject.j"
+@import "CPEnumerator.j"
+@import "CPException.j"
+
+/* @ignore */
+@implementation _CPDictionaryValueEnumerator : CPEnumerator
+{
+ CPEnumerator _keyEnumerator;
+ CPDictionary _dictionary;
+}
+
+- (id)initWithDictionary:(CPDictionary)aDictionary
+{
+ self = [super init];
+
+ if (self)
+ {
+ _keyEnumerator = [aDictionary keyEnumerator];
+ _dictionary = aDictionary;
+ }
+
+ return self;
+}
+
+- (id)nextObject
+{
+ var key = [_keyEnumerator nextObject];
+
+ if (!key)
+ return nil;
+
+ return [_dictionary objectForKey:key];
+}
+
+@end
+
+/*!
+ @class CPDictionary
+ @ingroup foundation
+ @brief A mutable key-value pair collection.
+
+ A dictionary is the standard way of passing around key-value pairs in
+ the Cappuccino framework. It is similar to the
+ <a href="http://java.sun.com/javase/6/docs/api/index.html">Java map interface</a>,
+ except all keys are CPStrings and values can be any
+ Cappuccino or JavaScript object.
+
+ If you are familiar with dictionaries in Cocoa, you'll notice that
+ there is no CPMutableDictionary class. The regular CPDictionary
+ has \c -setObject:forKey: and \c -removeObjectForKey: methods.
+ In Cappuccino there is no distinction between immutable and mutable classes.
+ They are all mutable.
+*/
+@implementation CPDictionary : CPObject
+{
+}
+
+/*
+ @ignore
+*/
++ (id)alloc
+{
+ return new objj_dictionary();
+}
+
+/*!
+ Returns a new empty CPDictionary.
+*/
++ (id)dictionary
+{
+ return [[self alloc] init];
+}
+
+/*!
+ Returns a new dictionary, initialized with the contents of \c aDictionary.
+ @param aDictionary the dictionary to copy key-value pairs from
+ @return the new CPDictionary
+*/
++ (id)dictionaryWithDictionary:(CPDictionary)aDictionary
+{
+ return [[self alloc] initWithDictionary:aDictionary];
+}
+
+/*!
+ Creates a new dictionary with single key-value pair.
+ @param anObject the object for the paring
+ @param aKey the key for the pairing
+ @return the new CPDictionary
+*/
++ (id)dictionaryWithObject:(id)anObject forKey:(id)aKey
+{
+ return [[self alloc] initWithObjects:[anObject] forKeys:[aKey]];
+}
+
+/*!
+ Creates a dictionary with multiple key-value pairs.
+ @param objects the objects to place in the dictionary
+ @param keys the keys for each of the objects
+ @throws CPInvalidArgumentException if the number of objects and keys is different
+ @return the new CPDictionary
+*/
++ (id)dictionaryWithObjects:(CPArray)objects forKeys:(CPArray)keys
+{
+ return [[self alloc] initWithObjects:objects forKeys:keys];
+}
+
+/*!
+ Creates a dictionary with multiple key-value pairs.
+ @param JavaScript object
+ @return the new CPDictionary
+*/
++ (id)dictionaryWithJSObject:(JSObject)object
+{
+ return [self dictionaryWithJSObject:object recursively:NO];
+}
+
+/*!
+ Creates a dictionary with multiple key-value pairs, recursively.
+ @param JavaScript object
+ @return the new CPDictionary
+*/
++ (id)dictionaryWithJSObject:(JSObject)object recursively:(BOOL)recursively
+{
+ var dictionary = [[self alloc] init];
+
+ for (var key in object)
+ {
+ var value = object[key];
+
+ if (recursively && value.constructor === Object)
+ value = [CPDictionary dictionaryWithJSObject:value recursively:YES];
+
+ [dictionary setObject:value forKey:key];
+ }
+
+ return dictionary;
+}
+
+/*!
+ Creates and returns a dictionary constructed by a given pairs of keys and values.
+ @param firstObject first object value
+ @param ... key for the first object and ongoing value-key pairs for more objects.
+ @throws CPInvalidArgumentException if the number of objects and keys is different
+ @return the new CPDictionary
+
+ Assuming that there's no object retaining in Cappuccino, you can create
+ dictionaries same way as with alloc and initWithObjectsAndKeys:
+ var dict = [CPDictionary dictionaryWithObjectsAndKeys:
+ @"value1", @"key1",
+ @"value2", @"key2"];
+
+ Note, that there's no final nil like in Objective-C/Cocoa.
+
+ @see [CPDictionary initWithObjectsAndKeys:]
+*/
++ (id)dictionaryWithObjectsAndKeys:(id)firstObject, ...
+{
+ arguments[0] = [self alloc];
+ arguments[1] = @selector(initWithObjectsAndKeys:);
+
+ return objj_msgSend.apply(this, arguments);
+}
+
+/*!
+ Initializes the dictionary with the contents of another dictionary.
+ @param aDictionary the dictionary to copy key-value pairs from
+ @return the initialized dictionary
+*/
+- (id)initWithDictionary:(CPDictionary)aDictionary
+{
+ var key = "",
+ dictionary = [[CPDictionary alloc] init];
+
+ for (key in aDictionary._buckets)
+ [dictionary setObject:[aDictionary objectForKey:key] forKey:key];
+
+ return dictionary;
+}
+
+/*!
+ Initializes the dictionary from the arrays of keys and objects.
+ @param objects the objects to put in the dictionary
+ @param keyArray the keys for the objects to put in the dictionary
+ @throws CPInvalidArgumentException if the number of objects and keys is different
+ @return the initialized dictionary
+*/
+- (id)initWithObjects:(CPArray)objects forKeys:(CPArray)keyArray
+{
+ self = [super init];
+
+ if ([objects count] != [keyArray count])
+ [CPException raise:CPInvalidArgumentException reason:"Counts are different.("+[objects count]+"!="+[keyArray count]+")"];
+
+ if (self)
+ {
+ var i = [keyArray count];
+
+ while (i--)
+ [self setObject:objects[i] forKey:keyArray[i]];
+ }
+
+ return self;
+}
+
+/*!
+ Creates and returns a dictionary constructed by a given pairs of keys and values.
+ @param firstObject first object value
+ @param ... key for the first object and ongoing value-key pairs for more objects.
+ @throws CPInvalidArgumentException if the number of objects and keys is different
+ @return the new CPDictionary
+
+ You can create dictionaries this way:
+ var dict = [[CPDictionary alloc] initWithObjectsAndKeys:
+ @"value1", @"key1",
+ @"value2", @"key2"];
+
+ Note, that there's no final nil like in Objective-C/Cocoa.
+*/
+- (id)initWithObjectsAndKeys:(id)firstObject, ...
+{
+ var argCount = arguments.length;
+
+ if (argCount % 2 !== 0)
+ [CPException raise:CPInvalidArgumentException reason:"Key-value count is mismatched. (" + argCount + " arguments passed)"];
+
+ self = [super init];
+
+ if (self)
+ {
+ // The arguments array contains self and _cmd, so the first object is at position 2.
+ var index = 2;
+
+ for(; index < argCount; index += 2)
+ {
+ var value = arguments[index];
+
+ if (value === nil)
+ break;
+
+ [self setObject:value forKey:arguments[index + 1]];
+ }
+ }
+
+ return self;
+}
+
+/*!
+ return a copy of the receiver (does not deep copy the objects contained in the dictionary).
+*/
+- (CPDictionary)copy
+{
+ return [CPDictionary dictionaryWithDictionary:self];
+}
+
+/*!
+ Returns the number of entries in the dictionary
+*/
+- (int)count
+{
+ return count;
+}
+
+/*!
+ Returns an array of keys for all the entries in the dictionary.
+*/
+- (CPArray)allKeys
+{
+ return _keys;
+}
+
+/*!
+ Returns an array of values for all the entries in the dictionary.
+*/
+- (CPArray)allValues
+{
+ var index = _keys.length,
+ values = [];
+
+ while (index--)
+ values.push(dictionary_getValue(self, [_keys[index]]));
+
+ return values;
+}
+
+/*!
+ Returns an enumerator that enumerates over all the dictionary's keys.
+*/
+- (CPEnumerator)keyEnumerator
+{
+ return [_keys objectEnumerator];
+}
+
+/*!
+ Returns an enumerator that enumerates over all the dictionary's values.
+*/
+- (CPEnumerator)objectEnumerator
+{
+ return [[_CPDictionaryValueEnumerator alloc] initWithDictionary:self];
+}
+
+/*!
+ Compare the receiver to this dictionary, and return whether or not they are equal.
+*/
+- (BOOL)isEqualToDictionary:(CPDictionary)aDictionary
+{
+ if (count !== [aDictionary count])
+ return NO;
+
+ var index = count;
+ while (index--)
+ {
+ var currentKey = _keys[index],
+ lhsObject = _buckets[currentKey],
+ rhsObject = aDictionary._buckets[currentKey];
+
+ if (lhsObject === rhsObject)
+ continue;
+
+ if (lhsObject.isa && rhsObject.isa && [lhsObject respondsToSelector:@selector(isEqual:)] && [lhsObject isEqual:rhsObject])
+ continue;
+
+ return NO;
+ }
+
+ return YES;
+}
+
+/*
+ Instance.isEqualToDictionary(aDictionary)
+ {
+ if(this.count()!=aDictionary.count()) return NO;
+
+ var i= this._keys.count();
+ while(i--) if(this.objectForKey(this._keys[i])!=aDictionary.objectForKey(this._keys[i])) return NO;
+
+ return YES;
+ }
+
+ Instance.allKeys()
+ {
+ return this._keys;
+ }
+
+ Instance.allKeysForObject(anObject)
+ {
+ var i= 0,
+ keys= CPArray.array(),
+ count= this.count();
+
+ while((i= this._objects.indexOfObjectInRage(0, count-i))!=CPNotFound) keys.addObject(this._keys[i]);
+
+ return keys;
+ }
+
+ Instance.allValues()
+ {
+ return this._objects;
+ }
+
+ Instance.keyEnumerator()
+ {
+ return this._keys.objectEnumerator();
+ }
+
+ Instance.keysSortedByValueUsingSelector(aSelector)
+ {
+ var dictionary= this,
+ objectSelector= function(rhs)
+ {
+ return aSelector.apply(dictionary.objectForKey(this), [dictionary.objectForKey(rhs)]);
+ };
+
+ return this._keys.sortedArrayUsingSelector(objectSelector);
+ }
+
+ Instance.objectEnumerator()
+ {
+ return this._objects.objectEnumerator();
+ }
+*/
+/*!
+ Returns the object for the entry with key \c aKey.
+ @param aKey the key for the object's entry
+ @return the object for the entry
+*/
+- (id)objectForKey:(CPString)aKey
+{
+ var object = _buckets[aKey];
+
+ return (object === undefined) ? nil : object;
+}
+/*
+ Instance.objectsForKeys(keys, aNotFoundMarker)
+ {
+ var i= keys.length,
+ objects= CPArray.array();
+
+ while(i--)
+ {
+ var object= this.objectForKey(keys[i]);
+ objects.addObject(object==nil?aNotFoundMarker:object);
+ }
+
+ return objects;
+ }
+
+ Instance.valueForKey(aKey)
+ {
+ if(aKey.length && aKey[0]=="@") return this.objectForKey(aKey.substr(1));
+
+ return base.valueForKey(aKey);
+ }
+
+ //
+
+ Instance.addEntriesFromDictionary(aDictionary)
+ {
+ var key,
+ keyEnumerator= aDictionary.keyEnumerator();
+
+ while(key= keyEnumerator.nextObject()) this.setObjectForKey(aDictionary.objectForKey(key), key);
+ }
+*/
+/*!
+ Removes all the entries from the dictionary.
+*/
+- (void)removeAllObjects
+{
+ _keys = [];
+ count = 0;
+ _buckets = {};
+}
+
+/*!
+ Removes the entry for the specified key.
+ @param aKey the key of the entry to be removed
+*/
+- (void)removeObjectForKey:(id)aKey
+{
+ dictionary_removeValue(self, aKey);
+}
+
+/*!
+ Removes each entry in allKeys from the receiver.
+ @param allKeys an array of keys that will be removed from the dictionary
+*/
+- (void)removeObjectsForKeys:(CPArray)allKeys
+{
+ var index = allKeys.length;
+
+ while (index--)
+ dictionary_removeValue(self, allKeys[index]);
+}
+
+/*
+ Instance.removeObjectForKey(aKey)
+ {
+ var entry= this._dictionary[aKey];
+
+ if(entry)
+ {
+ var range= CPMakeRange(entry.index, 1);
+
+ this._keys.removeObjectsInRange(range);
+ this._objects.removeObjectsInRange(range);
+
+ delete this._dictionary[aKey];
+ }
+ }
+
+ Instance.setDictionary(aDictionary)
+ {
+ this._keys= CPArray.arrayWithArray(aDictionary.allKeys());
+ this._objects= CPArray.arrayWithArray(aDictionary.allValues());
+
+ this._dictionary= { };
+
+ var i= this._keys.count();
+ while(i--) this._dictionary[this._keys[i]]= { object: this._objects[i], index: i };
+ }
+*/
+/*!
+ Adds an entry into the dictionary.
+ @param anObject the object for the entry
+ @param aKey the entry's key
+*/
+- (void)setObject:(id)anObject forKey:(id)aKey
+{
+ dictionary_setValue(self, aKey, anObject);
+}
+/*
+ Instance.setValueForKey(aValue, aKey)
+ {
+ if(!aValue) this.removeObjectForKey(aKey);
+ else this.setObjectForKey(aValue, aKey);
+ }
+
+ Instance.copy()
+ {
+ return CPDictionary.alloc().dictionaryWithDictionary(this);
+ }
+*/
+
+/*!
+ Take all the key/value pairs in aDictionary and apply them to this dictionary.
+*/
+- (void)addEntriesFromDictionary:(CPDictionary)aDictionary
+{
+ if (!aDictionary)
+ return;
+
+ var keys = [aDictionary allKeys],
+ index = [keys count];
+
+ while (index--)
+ {
+ var key = keys[index];
+
+ [self setObject:[aDictionary objectForKey:key] forKey:key];
+ }
+}
+
+/*!
+ Returns a human readable description of the dictionary.
+*/
+- (CPString)description
+{
+ var description = @"CPDictionary {\n";
+
+ var i = _keys.length;
+
+ while (i--)
+ {
+ description += _keys[i] + ":";
+
+ var object = _buckets[_keys[i]];
+
+ if (object && object.isa)
+ description += [object description];
+ else
+ description += object;
+
+ description += "\n";
+ }
+
+ description += "}";
+
+ return description;
+}
+
+@end
+
+@implementation CPDictionary (CPCoding)
+
+/*
+ Initializes the dictionary by unarchiving the data from a coder.
+ @param aCoder the coder from which the data will be unarchived.
+ @return the initialized dictionary
+*/
+- (id)initWithCoder:(CPCoder)aCoder
+{
+ return [aCoder _decodeDictionaryOfObjectsForKey:@"CP.objects"];
+}
+
+/*!
+ Archives the dictionary to a provided coder.
+ @param aCoder the coder to which the dictionary data will be archived.
+*/
+- (void)encodeWithCoder:(CPCoder)aCoder
+{
+ [aCoder _encodeDictionaryOfObjects:self forKey:@"CP.objects"];
+}
+
+@end
+
+/*!
+ @class CPMutableDictionary
+ @ingroup compatability
+
+ This class is just an empty subclass of CPDictionary.
+ CPDictionary already implements mutable methods and
+ this class only exists for source compatability.
+*/
+@implementation CPMutableDictionary : CPDictionary
+
+@end
+
+objj_dictionary.prototype.isa = CPDictionary;
diff --git a/tests/examplefiles/OrderedMap.hx b/tests/examplefiles/OrderedMap.hx
new file mode 100644
index 0000000..13b21f2
--- /dev/null
+++ b/tests/examplefiles/OrderedMap.hx
@@ -0,0 +1,584 @@
+package util;
+
+import util.Map;
+import util.Collection;
+import util.Set;
+import util.Option;
+import util.Debug;
+import util.Throwable;
+
+using util.StringFormat;
+
+/**
+ * An ordered map of (key,value) pairs. The key ordering is defined by
+ * a comparison function specified at construction. Duplicate keys
+ * are not allowed.
+ *
+ * Worst Case Time and Space Complexities:
+ * [operation] [time] [space]
+ * insert O(lg(n)) O(lg(n))
+ * find O(lg(n)) O(1)
+ * delete O(lg(n)) O(lg(n))
+ * range-query O(lg(n))* O(lg(n))
+ * iteration O(n)** O(lg(n))
+ * *range-query returns an iterator over elements in the range
+ * **total cost of iterating over the entire map
+ *
+ * The map is backed by a Left-Leaning Red-Black 2-3 Tree
+ * adapted from Robert Sedgewick (2008) (http://www.cs.princeton.edu/~rs/)
+ *
+ * Implementation choices (let size of tree be n)
+ * - Parent Pointers
+ * - This implementation omits parent pointers.
+ * - Omitting parent pointers saves n words of persistent memory
+ * at the expense of lg(n) stack space per operation.
+ * - Without parent pointers, most operations in the tree must
+ * either use recursion, or simulate recursion by saving a history
+ * of nodes via a stack. For example, each iterator will require
+ * lg(n) extra space to track progress through the tree. Insertions
+ * and deletions into the tree will also invalidate any existing
+ * iterators.
+ * - Node Size Information
+ * - This implementation omits the size of each node.
+ * - Omitting size information saves n words of long-term memory at
+ * the expense of not providing a find-kth operation.
+ * - This seems like a reasonable trade-off as range queries are
+ * generally more common than find-kth operations. The implementation
+ * used below could easily be modified to provide a version with
+ * size information should find-kth be of specific interest.
+ * - Recursive vs. Iterative
+ * - This implementation uses recursive algorithms.
+ * - The recursive implementations allow the code to remain compact and
+ * understandable. Since the height of LLRB 2-3 Trees is gaurenteed
+ * to be at most 2lg(n), stack overflow is typically not a concern.
+ * Unlike the standard single-rotation red-black algorithm, LLRB
+ * operations are not tail-recursive, so even an iterative
+ * version will require lg(n) extra memory.
+ */
+class OrderedMap<K,V>
+{
+ private var root :Null<Node<K,V>>;
+ private var nodeCount :Int;
+ private var comp :K -> K -> Int;
+
+ public function new( keyComp :K -> K -> Int )
+ {
+ root = null;
+ comp = keyComp;
+ nodeCount = 0;
+ assertInvariants();
+ }
+
+ /**
+ * @returns Some(v) if (\key,v) is in the map, None otherwise.
+ */
+ public function get(key :K) :Option<V>
+ {
+ //normal BST search
+ var n = root;
+ while( n != null )
+ {
+ var cmp = comp(key,n.key);
+ if( cmp < 0 )
+ {
+ n = n.left;
+ }
+ else if ( cmp > 0 )
+ {
+ n = n.right;
+ }
+ else
+ {
+ return Some(n.val);
+ }
+ }
+ return None;
+ }
+
+ /**
+ * Puts (\key,\val) into the map or replaces the current value of \key
+ * with \val.
+ *
+ * @return None if \key currently is not in the map, or Some(v) if (\key,v)
+ * was in the map before the put operation.
+ */
+ public function set(key :K, val :V) :Option<V>
+ {
+ var ret = new Ref<V>(null);
+ root = insertNode(root,key,val,ret);
+ root.color = black;
+
+ assertInvariants();
+
+ if( ret.r == null )
+ {
+ return None;
+ }
+ return Some(ret.r);
+ }
+
+ private function insertNode(n :Node<K,V>, key :K, val :V, ret :Ref<V>)
+ {
+ //do the insertion at the leaf level
+ if( n == null )
+ {
+ ++nodeCount;
+ return new Node<K,V>(key,val);
+ }
+
+ //normal BST search
+ var cmp = comp(key,n.key);
+ if( cmp < 0 )
+ {
+ n.left = insertNode(n.left,key,val,ret);
+ }
+ else if( cmp > 0 )
+ {
+ n.right = insertNode(n.right,key,val,ret);
+ }
+ else
+ {
+ //the key is already in the map, update the value
+ ret.r = n.val;
+ n.val = val;
+ }
+
+ return fixInvariants(n);
+ }
+
+ /**
+ * Removes (\key,v) from the map if it exists.
+ *
+ * @return None if (\key,v) wasn't in the map, Some(v) otherwise.
+ */
+ public function remove(key :K) :Option<V>
+ {
+ var ret = new Ref<V>(null);
+ if( root != null )
+ {
+ root = deleteNode(root,key,ret);
+ if( root != null )
+ {
+ root.color = black;
+ }
+ }
+
+ assertInvariants();
+
+ if( ret.r == null )
+ {
+ return None;
+ }
+ return Some(ret.r);
+ }
+
+ private function deleteNode( n :Node<K,V>, key :K, ret :Ref<V> )
+ {
+ if( comp(key,n.key) < 0 )
+ {
+ if( isBlack(n.left) && isBlack(n.left.left) )
+ {
+ //ensure we move into a 3-node
+ n = moveRedLeft(n);
+ }
+ n.left = deleteNode(n.left,key,ret);
+ }
+ else
+ {
+ if( isRed(n.left) )
+ {
+ //ensure we move into a 3-node
+ n = rotateRight(n);
+ }
+ if( comp(key,n.key) == 0 && n.right == null )
+ {
+ //delete the node
+ ret.r = n.val;
+ --nodeCount;
+ return null;
+ }
+ if( isBlack(n.right) && isBlack(n.right.left) )
+ {
+ //ensure we move into a 3-node
+ n = moveRedRight(n);
+ }
+ if( comp(key,n.key) == 0 )
+ {
+ Debug.assert(n.right != null);
+
+ ret.r = n.val;
+
+ //ensure we are deleting a node with at most one child
+ var min = minNode(n.right);
+ n.val = min.val;
+ n.key = min.key;
+ n.right = deleteMinNode(n.right);
+ }
+ else
+ {
+ n.right = deleteNode(n.right,key,ret);
+ }
+ }
+
+ return fixInvariants(n);
+ }
+
+ /** returns a view of the set of keys in this TreeMap **/
+ public function keys() :SetView<K>
+ {
+ var _this = this;
+
+ return {
+ size: function() return _this.size(),
+ iterator: function() return IterTools.mapIter(new NodeIterator(_this.root),function(x) return x.key),
+ exists: function(x) {
+ return switch(_this.get(x))
+ {
+ case None: false;
+ case Some(_): true;
+ };
+ },
+ };
+ }
+
+ /** returns a view of the collection of values in this TreeMap **/
+ public function values() :CollectionView<V>
+ {
+ var _this = this;
+
+ return {
+ size: function() return _this.size(),
+ iterator: function() return IterTools.mapIter(new NodeIterator(_this.root),function(x) return x.val),
+ };
+ }
+
+ /** returns a view of the (key,value) pairs in this TreeMap **/
+ public function entries() :CollectionView<Entry<K,V>>
+ {
+ var _this = this;
+
+ return {
+ size: function() {
+ return _this.size();
+ },
+ iterator: function() {
+ return cast new NodeIterator(_this.root);
+ },
+ };
+ }
+
+ /** returns the number of (key,value) pairs in the map **/
+ public function size() :Int
+ {
+ return nodeCount;
+ }
+
+ public function toString() :String
+ {
+ var sb = new StringBuf();
+
+ sb.add("{");
+ for( entry in this.entries() )
+ {
+ sb.add("%y => %y, ".sprintf([entry.key,entry.val]));
+ }
+ sb.add("}");
+
+ return sb.toString();
+ }
+
+ private static function isRed<K,V>( n :Node<K,V> )
+ {
+ if( n == null ) return false;
+ return switch(n.color)
+ {
+ case red: true;
+ case black: false;
+ };
+ }
+
+ private static inline function isBlack<K,V>( n :Node<K,V> )
+ {
+ return !isRed(n);
+ }
+
+ private static function colorFlip<K,V>( n :Node<K,V> )
+ {
+ n.color = oppositeColor(n.color);
+ n.left.color = oppositeColor(n.left.color);
+ n.right.color = oppositeColor(n.right.color);
+ }
+
+ private static inline function oppositeColor( c :Color )
+ {
+ return switch(c)
+ {
+ case red: black;
+ case black: red;
+ };
+ }
+
+ private static function rotateLeft<K,V>( n :Node<K,V> )
+ {
+ Debug.assert(n != null);
+ Debug.assert(n.right != null);
+ /*
+ n x
+ / \ / \
+ a x => n c
+ / \ / \
+ b c a b
+ */
+ var x = n.right;
+ n.right = x.left;
+ x.left = n;
+ x.color = n.color;
+ n.color = red;
+ return x;
+ }
+
+ private static function rotateRight<K,V>( n :Node<K,V> )
+ {
+ Debug.assert( n != null );
+ Debug.assert( n.left != null );
+ /*
+ n x
+ / \ / \
+ x c => a n
+ / \ / \
+ a b b c
+ */
+ var x = n.left;
+ n.left = x.right;
+ x.right = n;
+ x.color = n.color;
+ n.color = red;
+ return x;
+ }
+
+ private static function moveRedLeft<K,V>( n :Node<K,V> )
+ {
+ //borrow extra node from right child (which is a 3-node)
+ colorFlip(n);
+ if( isRed(n.right.left) )
+ {
+ n.right = rotateRight(n.right);
+ n = rotateLeft(n);
+ colorFlip(n);
+ }
+ return n;
+ }
+
+ private static function moveRedRight<K,V>( n :Node<K,V> )
+ {
+ //borrow extra node from left child (which is a 3-node)
+ colorFlip(n);
+ if( isRed(n.left.left) )
+ {
+ n = rotateRight(n);
+ colorFlip(n);
+ }
+ return n;
+ }
+
+ private static function fixInvariants<K,V>( n :Node<K,V> )
+ {
+ if( isRed(n.right) && isBlack(n.left) )
+ {
+ //ensure left-leaning property
+ n = rotateLeft(n);
+ }
+ if( isRed(n.left) && isRed(n.left.left) )
+ {
+ //balance 4-node
+ n = rotateRight(n);
+ }
+ if( isRed(n.left) && isRed(n.right) )
+ {
+ //split 4-node
+ colorFlip(n);
+ }
+ return n;
+ }
+
+ private function deleteMinNode<K,V>( n :Node<K,V> )
+ {
+ if( n.left == null )
+ {
+ //delete
+ --nodeCount;
+ return null;
+ }
+
+ if( isBlack(n.left) && isBlack(n.left.left) )
+ {
+ n = moveRedLeft(n);
+ }
+
+ n.left = deleteMinNode(n.left);
+
+ return fixInvariants(n);
+ }
+
+ private static function minNode<K,V>( n :Node<K,V> )
+ {
+ Debug.assert(n != null);
+
+ while( n.left != null )
+ {
+ n = n.left;
+ }
+ return n;
+ }
+
+ private static function maxNode<K,V>( n :Node<K,V> )
+ {
+ Debug.assert(n != null);
+
+ while( n.right != null )
+ {
+ n = n.right;
+ }
+ return n;
+ }
+
+ /** Used to verify that the invariants of the tree hold **/
+ private inline function assertInvariants()
+ {
+ #if DEBUG
+ Debug.assert( isBlack(root), "root is black: " + root );
+
+ assertIsTree(root,new List<Node<K,V>>());
+ assertBlackNodeCount(root);
+ assertBSTOrdering(root,comp);
+ #end
+ }
+
+ private static function assertIsTree<K,V>( n: Node<K,V>, visited :List<Node<K,V>> )
+ {
+ if( n == null )
+ {
+ return;
+ }
+
+ for( r in visited )
+ {
+ Debug.assert( n != r );
+ }
+ visited.push(n);
+ assertIsTree(n.left,visited);
+ assertIsTree(n.right,visited);
+ }
+
+ private static function assertBlackNodeCount<K,V>( n: Node<K,V> ) :Int
+ {
+ if( n == null )
+ {
+ return 1;
+ }
+
+ var leftCount = assertBlackNodeCount(n.left);
+ var rightCount = assertBlackNodeCount(n.right);
+
+ Debug.assert(
+ leftCount == rightCount,
+ "num of black nodes in all paths for left and right child not equal" + n
+ );
+
+ return leftCount + switch(n.color) {
+ case red: 0;
+ case black: 1;
+ }
+ }
+
+ private static function assertBSTOrdering<K,V>( n: Node<K,V>, compK :K -> K -> Int ) :Void
+ {
+ if( n == null )
+ {
+ return;
+ }
+
+ if( n.left != null && n.left.val != null )
+ {
+ Debug.assert( compK(n.left.key,n.key) < 0, "left child not less than its parent" + n );
+ assertBSTOrdering(n.left,compK);
+ }
+
+ if( n.right != null && n.right.val != null )
+ {
+ Debug.assert( compK(n.key,n.right.key) < 0, "parent not less than its right child" + n );
+ assertBSTOrdering(n.right,compK);
+ }
+ }
+}
+
+private enum Color
+{
+ red;
+ black;
+}
+
+private class Node<K,V> /*implements Entry<K,V>*/
+{
+ public var left :Null<Node<K,V>>;
+ public var right :Null<Node<K,V>>;
+ public var color :Color;
+
+ public var key :K;
+ public var val :V;
+
+ public function new(k :K, v :V)
+ {
+ key = k;
+ val = v;
+ color = red;
+ }
+}
+
+private class NodeIterator<K,V>
+{
+ private var curr :Node<K,V>;
+ private var fringe :Array<Node<K,V>>;
+
+ public function new( root :Node<K,V> )
+ {
+ fringe = new Array<Node<K,V>>();
+ traverseToMin(root);
+ curr = fringe.pop();
+ }
+
+ public inline function hasNext() :Bool
+ {
+ return curr != null;
+ }
+
+ public function next() :Node<K,V>
+ {
+ if( !hasNext() )
+ {
+ throw new NoSuchElement();
+ }
+ var ret = curr;
+
+ if( fringe.length > 0 )
+ {
+ curr = fringe.pop();
+ traverseToMin(curr.right);
+ }
+ else
+ {
+ curr = null;
+ }
+
+ return ret;
+ }
+
+ private function traverseToMin( n :Node<K,V> )
+ {
+ while( n != null )
+ {
+ fringe.push(n);
+ n = n.left;
+ }
+ }
+} \ No newline at end of file
diff --git a/tests/examplefiles/Sorting.mod b/tests/examplefiles/Sorting.mod
new file mode 100644
index 0000000..d6a27c1
--- /dev/null
+++ b/tests/examplefiles/Sorting.mod
@@ -0,0 +1,470 @@
+IMPLEMENTATION MODULE Sorting;
+
+(* J. Andrea, Dec.16/91 *)
+(* This code may be freely used and distributed, it may not be sold. *)
+
+(* Adapted to ISO Module-2 by Frank Schoonjans Feb 2004 *)
+
+FROM Storage IMPORT ALLOCATE;
+
+CONST
+ max_stack = 20;
+ n_small = 6; (* use a simple sort for this size and smaller *)
+
+VAR
+ rtemp :REAL;
+ ctemp :CARDINAL;
+
+ L, R, n :INTEGER;
+ top, bottom, lastflip :INTEGER;
+
+ tos :CARDINAL;
+ Lstack, Rstack :ARRAY [1..max_stack] OF INTEGER;
+
+ (* --------------------------------------------------- *)
+ PROCEDURE CardQSortIndex( x :ARRAY OF CARDINAL; array_len :CARDINAL;
+ VAR index :ARRAY OF CARDINAL );
+
+ VAR
+ median : CARDINAL;
+ i,j : INTEGER;
+ BEGIN
+
+ n := VAL(INTEGER,array_len) - 1; (* back to zero offset *)
+
+ (* initialize the index *)
+ FOR i := 0 TO n DO
+ index[i] := VAL(CARDINAL,i);
+ END;
+
+ tos := 0;
+
+ L := 0; R := n;
+
+ (* PUSH very first set *)
+ tos := tos + 1; Lstack[tos] := L; Rstack[tos] := R;
+
+ REPEAT
+
+ (* POP *)
+ L := Lstack[tos]; R := Rstack[tos]; tos := tos - 1;
+
+ IF R - L + 1 > n_small THEN
+
+ REPEAT
+ i := L; j := R; median := x[index[( L + R ) DIV 2]];
+
+ REPEAT
+ WHILE x[index[i]] < median DO
+ i := i + 1;
+ END;
+ WHILE median < x[index[j]] DO
+ j := j - 1;
+ END;
+
+ IF i <= j THEN (* swap *)
+ ctemp := index[i]; index[i] := index[j]; index[j] := ctemp;
+ i := i + 1; j := j - 1;
+ END;
+ UNTIL i > j;
+
+ IF j - L < R - i THEN
+ IF i < R THEN (* PUSH *)
+ tos := tos + 1; Lstack[tos] := i; Rstack[tos] := R;
+ END;
+ R := j;
+ ELSE
+ IF L < j THEN (* push *)
+ tos := tos + 1; Lstack[tos] := L; Rstack[tos] := j;
+ END;
+ L := i;
+ END;
+
+ UNTIL L >= R;
+
+ ELSE
+
+ (* small sort for small number of values *)
+ FOR i := L TO R - 1 DO
+ FOR j := i TO R DO
+ IF x[index[i]] > x[index[j]] THEN
+ ctemp := index[i];
+ index[i] := index[j];
+ index[j] := ctemp
+ END;
+ END;
+ END;
+
+ END; (* check for small *)
+
+ UNTIL tos = 0;
+
+ END CardQSortIndex;
+
+ (* --------------------------------------------------- *)
+ PROCEDURE RealQSortIndex( x :ARRAY OF REAL; array_len :CARDINAL;
+ VAR index :ARRAY OF CARDINAL );
+
+ VAR
+ median :REAL;
+ i,j :INTEGER;
+ BEGIN
+
+ n := VAL(INTEGER,array_len) - 1; (* back to zero offset *)
+
+ (* initialize the index *)
+ FOR i := 0 TO n DO
+ index[i] := VAL(CARDINAL,i);
+ END;
+
+ tos := 0;
+
+ L := 0; R := n;
+
+ (* PUSH very first set *)
+ tos := tos + 1; Lstack[tos] := L; Rstack[tos] := R;
+
+ REPEAT
+
+ (* POP *)
+ L := Lstack[tos]; R := Rstack[tos]; tos := tos - 1;
+
+ IF R - L + 1 > n_small THEN
+
+ REPEAT
+ i := L; j := R; median := x[index[( L + R ) DIV 2]];
+
+ REPEAT
+ WHILE x[index[i]] < median DO
+ i := i + 1;
+ END;
+ WHILE median < x[index[j]] DO
+ j := j - 1;
+ END;
+
+ IF i <= j THEN (* swap *)
+ ctemp := index[i]; index[i] := index[j]; index[j] := ctemp;
+ i := i + 1; j := j - 1;
+ END;
+ UNTIL i > j;
+
+ IF j - L < R - i THEN
+ IF i < R THEN (* PUSH *)
+ tos := tos + 1; Lstack[tos] := i; Rstack[tos] := R;
+ END;
+ R := j;
+ ELSE
+ IF L < j THEN (* push *)
+ tos := tos + 1; Lstack[tos] := L; Rstack[tos] := j;
+ END;
+ L := i;
+ END;
+
+ UNTIL L >= R;
+
+ ELSE
+
+ (* small sort for small number of values *)
+ FOR i := L TO R - 1 DO
+ FOR j := i TO R DO
+ IF x[index[i]] > x[index[j]] THEN
+ ctemp := index[i];
+ index[i] := index[j];
+ index[j] := ctemp
+ END;
+ END;
+ END;
+
+ END; (* check for small *)
+
+ UNTIL tos = 0;
+
+ END RealQSortIndex;
+
+ (* --------------------------------------------------- *)
+ PROCEDURE CardQSort( VAR x :ARRAY OF CARDINAL; array_len :CARDINAL );
+
+ VAR
+ median : CARDINAL;
+ n,i,j : INTEGER;
+ BEGIN
+
+ n := VAL(INTEGER,array_len) - 1; (* back to zero offset *)
+
+ tos := 0;
+
+ L := 0; R := n;
+
+ (* PUSH very first set *)
+ tos := tos + 1; Lstack[tos] := L; Rstack[tos] := R;
+
+ REPEAT
+
+ (* POP *)
+ L := Lstack[tos]; R := Rstack[tos]; tos := tos - 1;
+
+ IF R - L + 1 > n_small THEN
+
+ REPEAT
+ i := L; j := R; median := x[( L + R ) DIV 2];
+
+ REPEAT
+ WHILE x[i] < median DO
+ i := i + 1;
+ END;
+ WHILE median < x[j] DO
+ j := j - 1;
+ END;
+
+ IF i <= j THEN (* swap *)
+ ctemp := x[i]; x[i] := x[j]; x[j] := ctemp;
+ i := i + 1; j := j - 1;
+ END;
+ UNTIL i > j;
+
+ IF j - L < R - i THEN
+ IF i < R THEN (* PUSH *)
+ tos := tos + 1; Lstack[tos] := i; Rstack[tos] := R;
+ END;
+ R := j;
+ ELSE
+ IF L < j THEN (* push *)
+ tos := tos + 1; Lstack[tos] := L; Rstack[tos] := j;
+ END;
+ L := i;
+ END;
+
+ UNTIL L >= R;
+
+ ELSE
+
+ (* small sort for small number of values *)
+ FOR i := L TO R - 1 DO
+ FOR j := i TO R DO
+ IF x[i] > x[j] THEN
+ ctemp := x[i];
+ x[i] := x[j];
+ x[j] := ctemp
+ END;
+ END;
+ END;
+
+ END; (* check for small *)
+
+ UNTIL tos = 0;
+
+ END CardQSort;
+
+ (* ----------------------------------------------------- *)
+ PROCEDURE CardBSort( VAR x :ARRAY OF CARDINAL; array_len :CARDINAL );
+ VAR i,j : INTEGER;
+ BEGIN
+ top := 0; (* open arrays are zero offset *)
+ bottom := VAL(INTEGER,array_len) - 1;
+
+ WHILE top < bottom DO
+
+ lastflip := top;
+
+ FOR i := top TO bottom-1 DO
+ IF x[i] > x[i+1] THEN (* flip *)
+ ctemp := x[i];
+ x[i] := x[i+1];
+ x[i+1] := ctemp;
+ lastflip := i;
+ END;
+ END;
+
+ bottom := lastflip;
+
+ IF bottom > top THEN
+
+ i := bottom - 1;
+ FOR j := top TO bottom-1 DO
+ IF x[i] > x[i+1] THEN (* flip *)
+ ctemp := x[i];
+ x[i] := x[i+1];
+ x[i+1] := ctemp;
+ lastflip := i;
+ END;
+ i := i - 1;
+ END;
+
+ top := lastflip + 1;
+
+ ELSE
+ (* force a loop failure *)
+ top := bottom + 1;
+ END;
+
+ END;
+
+ END CardBSort;
+
+
+ (* ----------------------------------------------------- *)
+ PROCEDURE RealBSort( VAR x :ARRAY OF REAL; array_len :CARDINAL );
+ VAR bottom,top : INTEGER;
+ i,j : INTEGER;
+ BEGIN
+ top := 0; (* open arrays are zero offset *)
+ bottom := VAL(INTEGER,array_len) - 1;
+
+ WHILE top < bottom DO
+
+ lastflip := top;
+
+ FOR i := top TO bottom-1 DO
+ IF x[i] > x[i+1] THEN (* flip *)
+ rtemp := x[i];
+ x[i] := x[i+1];
+ x[i+1] := rtemp;
+ lastflip := i;
+ END;
+ END;
+
+ bottom := lastflip;
+
+ IF bottom > top THEN
+
+ i := bottom - 1;
+ FOR j := top TO bottom-1 DO
+ IF x[i] > x[i+1] THEN (* flip *)
+ rtemp := x[i];
+ x[i] := x[i+1];
+ x[i+1] := rtemp;
+ lastflip := i;
+ END;
+ i := i - 1;
+ END;
+
+ top := lastflip + 1;
+
+ ELSE
+ (* force a loop failure *)
+ top := bottom + 1;
+ END;
+
+ END;
+
+ END RealBSort;
+
+
+ (* ----------------------------------------------------- *)
+ PROCEDURE TopoSort( x, y :ARRAY OF CARDINAL; n_pairs :CARDINAL;
+ VAR solution :ARRAY OF CARDINAL; VAR n_solution :CARDINAL;
+ VAR error, sorted :BOOLEAN );
+ (*
+ This procedure needs some garbage collection added, i've tried but
+ will little success. J. Andrea, Dec.18/91
+ *)
+
+ TYPE
+ LPtr = POINTER TO Leader;
+ TPtr = POINTER TO Trailer;
+
+ Leader = RECORD
+ key :CARDINAL;
+ count :INTEGER;
+ trail :TPtr;
+ next :LPtr;
+ END;
+
+ Trailer = RECORD
+ id :LPtr;
+ next :TPtr;
+ END;
+
+ VAR
+ p, q, head, tail :LPtr;
+ t :TPtr;
+ i, max_solutions :CARDINAL;
+
+ (* -------------------------------------------- *)
+ PROCEDURE Find( w :CARDINAL ) :LPtr;
+ VAR h :LPtr;
+ BEGIN
+ h := head; tail^.key := w; (* sentinel *)
+ WHILE h^.key # w DO
+ h := h^.next;
+ END;
+ IF h = tail THEN
+ NEW( tail );
+ n := n + 1;
+ h^.count := 0;
+ h^.trail := NIL;
+ h^.next := tail;
+ END;
+ RETURN h;
+ END Find;
+
+ BEGIN
+
+ error := FALSE;
+ n_solution := 0;
+
+ IF n_pairs < 2 THEN
+ error := TRUE;
+ ELSE
+
+ max_solutions := HIGH( solution ) + 1;
+
+ NEW( head ); tail := head; n := 0;
+
+ (* add all of the given pairs *)
+
+ FOR i := 0 TO n_pairs - 1 DO
+ p := Find( x[i] ); q := Find( y[i] );
+ NEW(t);
+ t^.id := q;
+ t^.next := p^.trail;
+ p^.trail := t;
+ q^.count := q^.count + 1;
+ END;
+
+ (* search for leaders without predecessors *)
+
+ p := head; head := NIL;
+ WHILE p # tail DO
+ q := p; p := q^.next;
+ IF q^.count = 0 THEN
+ (* insert q^ in new chain *)
+ q^.next := head; head := q;
+ END;
+ END;
+
+ (* output phase *)
+
+ q := head;
+ WHILE ( NOT error ) & ( q # NIL ) DO
+ n_solution := n_solution + 1;
+ IF n_solution > max_solutions THEN
+ error := TRUE;
+ ELSE
+
+ solution[n_solution-1] := q^.key;
+ n := n - 1;
+ t := q^.trail; q := q^.next;
+ WHILE t # NIL DO
+ p := t^.id; p^.count := p^.count - 1;
+ IF p^.count = 0 THEN
+ (* insert p^ in leader list *)
+ p^.next := q; q := p;
+ END;
+ t := t^.next;
+ END;
+ END;
+ END;
+
+ IF n # 0 THEN
+ sorted := FALSE;
+ ELSE
+ sorted := TRUE;
+ END;
+
+ END;
+
+ END TopoSort;
+
+BEGIN
+END Sorting.
diff --git a/tests/examplefiles/example.aspx b/tests/examplefiles/aspx-cs_example
index 01de00e..01de00e 100644
--- a/tests/examplefiles/example.aspx
+++ b/tests/examplefiles/aspx-cs_example
diff --git a/tests/examplefiles/demo.cfm b/tests/examplefiles/demo.cfm
new file mode 100644
index 0000000..d94a06a
--- /dev/null
+++ b/tests/examplefiles/demo.cfm
@@ -0,0 +1,38 @@
+<!--- cfcomment --->
+<!-- html comment -->
+<html>
+<head>
+<title>Date Functions</title>
+</head>
+<body>
+<cfset RightNow = Now()>
+<cfoutput>
+ #RightNow#<br />
+ #DateFormat(RightNow)#<br />
+ #DateFormat(RightNow,"mm/dd/yy")#<br />
+ #TimeFormat(RightNow)#<br />
+ #TimeFormat(RightNow,"hh:mm tt")#<br />
+ #IsDate(RightNow)#<br />
+ #IsDate("January 31, 2007")#<br />
+ #IsDate("foo")#<br />
+ #DaysInMonth(RightNow)#
+</cfoutput>
+<cfoutput group="x">
+ #x#
+ <cfoutput>#y#</cfoutput>
+ #z#
+</cfoutput>
+</body>
+</html>
+
+<cfset person = "Paul">
+<cfset greeting = "Hello #person#">
+
+<cfset greeting = "Hello" & " world!">
+<cfset c = a^b>
+<cfset c = a MOD b>
+<cfset c = a / b>
+<cfset c = a * b>
+<cfset c = a + b>
+<cfset c = a - b>
+
diff --git a/tests/examplefiles/r-console-transcript.Rout b/tests/examplefiles/r-console-transcript.Rout
new file mode 100644
index 0000000..d0cf34b
--- /dev/null
+++ b/tests/examplefiles/r-console-transcript.Rout
@@ -0,0 +1,38 @@
+
+R version 2.9.2 (2009-08-24)
+Copyright (C) 2009 The R Foundation for Statistical Computing
+ISBN 3-900051-07-0
+
+R is free software and comes with ABSOLUTELY NO WARRANTY.
+You are welcome to redistribute it under certain conditions.
+Type 'license()' or 'licence()' for distribution details.
+
+ Natural language support but running in an English locale
+
+R is a collaborative project with many contributors.
+Type 'contributors()' for more information and
+'citation()' on how to cite R or R packages in publications.
+
+Type 'demo()' for some demos, 'help()' for on-line help, or
+'help.start()' for an HTML browser interface to help.
+Type 'q()' to quit R.
+
+[R.app GUI 1.29 (5464) i386-apple-darwin8.11.1]
+
+> x <- function {}
+Error: syntax error
+> x <- function() {}
+> x <- function() {
++ cat("hello")
++ cat("world")
++ }
+> x
+function() {
+cat("hello")
+cat("world")
+}
+> x()
+helloworld
+> 2 + 2
+[1] 4
+> \ No newline at end of file
diff --git a/tests/examplefiles/test.adb b/tests/examplefiles/test.adb
new file mode 100644
index 0000000..b79f3a5
--- /dev/null
+++ b/tests/examplefiles/test.adb
@@ -0,0 +1,211 @@
+-- Model IED Simulator
+-- COL Gene Ressler, 1 December 2007
+with Ada.Text_IO;
+
+with Ada.Characters.Latin_1;
+use Ada.Characters.Latin_1;
+
+with Ada.Strings.Fixed;
+use Ada.Strings.Fixed;
+
+with Ada.Strings;
+with Ada.Strings.Bounded;
+
+with Binary_Search;
+
+with Ada.Containers.Generic_Array_Sort;
+
+package body Scanner is
+
+ Constant_123 : constant Character := Character'Val (16#00#);
+ MAX_KEYWORD_LENGTH_C : constant Natural := 24;
+
+ New_Constant : constant New_Type
+ := 2;
+
+ KEYWORDS_C : constant Keyword_Array_T :=
+ (To_BS("description"),
+ To_BS("with"));
+
+ procedure Blah;
+
+ procedure blah is
+ begin
+
+ Declaration:
+ declare
+ Joe : Type_Type := Random;
+ begin
+ Do_Something;
+ end Declaration;
+ Loop_ID:
+ loop
+ Loop_Do;
+ exit when 1=2;
+ end loop Loop_ID;
+ if True or else False then
+ Do_This();
+ elsif not False and then True then
+ Do_That;
+ else
+ Panic;
+ end if;
+ end blah;
+
+ function "*" (Left, Right : in Integer) return Integer is
+ begin
+ <<Goto_Label>>
+ goto Goto_Label;
+ return Left + Right;
+ end "*";
+
+ function Function_Specification
+ (Param_1 : in Blah;
+ Param2, param3 : in access Blah_Type := 0)
+ return It_Type;
+
+ package Rename_Check renames Ada.Text_IO;
+
+ type New_Float is delta 0.001 digits 12;
+
+ package Package_Inst is new Ada.Strings.Bounded.Generic_Bounded_Length
+ (Max => MAX_KEYWORD_LENGTH_C);
+
+ type Array_Decl12 is array (Positive range <>) of SB.Bounded_String;
+ type Array_Decl3 is array (New_Type range Thing_1 .. Thing_2) of SB.Bounded_String;
+
+ type Boring_Type is
+ (Start,
+ End_Error);
+
+ subtype Sub_Type_check is Character range '0' .. '9';
+
+ Initialized_Array : constant Transistion_Array_T :=
+ (Start =>
+ (Letter_Lower | Letter_Upper => Saw_Alpha,
+ ' ' | HT | CR | LF => Start,
+ others => Begin_Error),
+
+ End_Error => (others => Start)
+
+ );
+
+ type Recorder is record
+ Advance : Boolean;
+ Return_Token : Token_T;
+ end record;
+
+ for Recorder use 8;
+
+ type Null_Record is null record;
+
+ type Discriminated_Record (Size : Natural) is
+ record
+ A : String (1 .. Size);
+ end record;
+
+ pragma Unchecked_Union (Union);
+ pragma Convention (C, Union);
+
+ type Person is tagged
+ record
+ Name : String (1 .. 10);
+ Gender : Gender_Type;
+ end record;
+
+ type Programmer is new Person with
+ record
+ Skilled_In : Language_List;
+ Favorite_Langauge : Python_Type;
+ end record;
+
+ type Programmer is new Person
+ and Printable
+ with
+ record
+ Skilled_In : Language_List;
+ Blah : aliased Integer;
+ end record;
+
+ ---------------------
+ -- Scan_Next_Token --
+ ---------------------
+
+ task Cyclic_Buffer_Task_Type is
+ entry Insert (An_Item : in Item);
+ entry Remove (An_Item : out Item);
+ end Cyclic_Buffer_Task_Type;
+
+ task body Cyclic_Buffer_Task_Type is
+ Q_Size : constant := 100;
+ subtype Q_Range is Positive range 1 .. Q_Size;
+ Length : Natural range 0 .. Q_Size := 0;
+ Head, Tail : Q_Range := 1;
+ Data : array (Q_Range) of Item;
+ begin
+ loop
+ select
+ when Length < Q_Size =>
+ accept Insert (An_Item : in Item) do
+ Data(Tail) := An_Item;
+ end Insert;
+ Tail := Tail mod Q_Size + 1;
+ Length := Length + 1;
+ or
+ when Length > 0 =>
+ accept Remove (An_Item : out Item) do
+ An_Item := Data(Head);
+ end Remove;
+ Head := Head mod Q_Size + 1;
+ Length := Length - 1;
+ end select;
+ end loop;
+ end Cyclic_Buffer_Task_Type;
+
+
+
+ procedure Scan_Next_Token
+ (S : in String;
+ Start_Index : out Positive;
+ End_Index : in out Natural; -- Tricky comment
+ Line_Number : in out Positive;
+ Token : out Token_T);
+
+ procedure Scan_Next_Token
+ (S : in String;
+ Start_Index : out Positive;
+ End_Index : in out Natural; -- Another comment
+ Line_Number : in out Positive;
+ Token : out Token_T)
+ is
+ begin
+ Scanner_Loop:
+ loop
+ if New_State = End_Error then
+ exit Scanner_Loop;
+ end if;
+
+ if State = Start and New_State /= Start then
+ Start_Index := Peek_Index;
+ end if;
+ end loop Scanner_Loop;
+ end Scan_Next_Token;
+
+ procedure Advance is
+ begin
+ Peek_Index := Peek_Index + 1;
+ end Advance;
+
+
+ -- Eliminate the leading space that Ada puts in front of positive
+ -- integer images.
+ function Image(N : in Integer) return String is
+ S : String := Integer'Image(N);
+ begin
+ if S(1) = ' ' then
+ return S(2 .. S'Last);
+ end if;
+ return S;
+ end Image;
+
+end Scanner;
diff --git a/tests/examplefiles/test.flx b/tests/examplefiles/test.flx
new file mode 100644
index 0000000..4c8a667
--- /dev/null
+++ b/tests/examplefiles/test.flx
@@ -0,0 +1,57 @@
+type tiny = "%i8";
+type int = "%i32";
+typedef bool = 2;
+fun add : int*int -> int = "%add";
+fun sub : int*int -> int = "%sub";
+fun eq : int*int -> bool = "%eq";
+fun lnot : bool -> bool = "%lnot";
+proc exit : int = "exit";
+
+// comment 1
+/*
+ /*
+ foo bar
+ */
+asdas
+*/
+
+noinline fun foo (x:int) = {
+ val y = 6;
+ return x + y;
+}
+
+noinline proc fake_exit (x:int) {
+ exit x;
+ return;
+}
+
+noinline fun bar (x:int) = {
+ var y = 10;
+ noinline proc baz () {
+ y = 20;
+ return;
+ }
+ baz ();
+ return x + y;
+}
+
+noinline fun x (a:int, b:int, c:tiny) = {
+ val x1 = a;
+ val x2 = b;
+ val x3 = c;
+ noinline fun y (d:int, e:int, f:tiny) = {
+ val y1 = x1;
+ val y2 = x2;
+ val y3 = f;
+ noinline fun z (g:int, h:int, i:tiny) = {
+ val z1 = x1;
+ val z2 = x2;
+ val z3 = i;
+ return z1;
+ }
+ return z (y1,y2,y3);
+ }
+ return y (x1,x2,x3);
+}
+
+fake_exit $ (foo 2) + (bar 3) + (x (1,2,3t));
diff --git a/tests/examplefiles/test.mod b/tests/examplefiles/test.mod
new file mode 100644
index 0000000..ba972e3
--- /dev/null
+++ b/tests/examplefiles/test.mod
@@ -0,0 +1,374 @@
+(* LIFO Storage Library
+ *
+ * @file LIFO.mod
+ * LIFO implementation
+ *
+ * Universal Dynamic Stack
+ *
+ * Author: Benjamin Kowarsch
+ *
+ * Copyright (C) 2009 Benjamin Kowarsch. All rights reserved.
+ *
+ * License:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met
+ *
+ * 1) NO FEES may be charged for the provision of the software. The software
+ * may NOT be published on websites that contain advertising, unless
+ * specific prior written permission has been obtained.
+ *
+ * 2) Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 3) Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and other materials provided with the distribution.
+ *
+ * 4) Neither the author's name nor the names of any contributors may be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * 5) Where this list of conditions or the following disclaimer, in part or
+ * as a whole is overruled or nullified by applicable law, no permission
+ * is granted to use the software.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ *)
+
+
+IMPLEMENTATION (* OF *) MODULE LIFO;
+
+FROM SYSTEM IMPORT ADDRESS, ADR, TSIZE;
+FROM Storage IMPORT ALLOCATE, DEALLOCATE;
+
+
+(* ---------------------------------------------------------------------------
+// Private type : ListEntry
+// ---------------------------------------------------------------------------
+*)
+TYPE ListPtr = POINTER TO ListEntry;
+
+TYPE ListEntry = RECORD
+ value : DataPtr;
+ next : ListPtr
+END; (* ListEntry *)
+
+
+(* ---------------------------------------------------------------------------
+// Opaque type : LIFO.Stack
+// ---------------------------------------------------------------------------
+// CAUTION: Modula-2 does not support the use of variable length array fields
+// in records. VLAs can only be implemented using pointer arithmetic which
+// means there is no type checking and no boundary checking on the array.
+// It also means that array notation cannot be used on the array which makes
+// the code difficult to read and maintain. As a result, Modula-2 is less
+// safe and less readable than C when it comes to using VLAs. Great care must
+// be taken to make sure that the code accessing VLA fields is safe. Boundary
+// checks must be inserted manually. Size checks must be inserted manually to
+// compensate for the absence of type checks. *)
+
+TYPE Stack = POINTER TO StackDescriptor;
+
+TYPE StackDescriptor = RECORD
+ overflow : ListPtr;
+ entryCount : StackSize;
+ arraySize : StackSize;
+ array : ADDRESS (* ARRAY OF DataPtr *)
+END; (* StackDescriptor *)
+
+
+(* ---------------------------------------------------------------------------
+// function: LIFO.new( initial_size, status )
+// ---------------------------------------------------------------------------
+//
+// Creates and returns a new LIFO stack object with an initial capacity of
+// <initialSize>. If zero is passed in for <initialSize>, then the stack
+// will be created with an initial capacity of LIFO.defaultStackSize. The
+// function fails if a value greater than LIFO.maximumStackSize is passed
+// in for <initialSize> or if memory could not be allocated.
+//
+// The initial capacity of a stack is the number of entries that can be stored
+// in the stack without enlargement.
+//
+// The status of the operation is passed back in <status>. *)
+
+PROCEDURE new ( initialSize : StackSize; VAR status : Status ) : Stack;
+
+VAR
+ newStack : Stack;
+
+BEGIN
+
+ (* zero size means default *)
+ IF initialSize = 0 THEN
+ initialSize := defaultStackSize;
+ END; (* IF *)
+
+ (* bail out if initial size is too high *)
+ IF initialSize > maximumStackSize THEN
+ status := invalidSize;
+ RETURN NIL;
+ END; (* IF *)
+
+ (* allocate new stack object *)
+ ALLOCATE(newStack, TSIZE(Stack) + TSIZE(DataPtr) * (initialSize - 1));
+
+ (* bail out if allocation failed *)
+ IF newStack = NIL THEN
+ status := allocationFailed;
+ RETURN NIL;
+ END; (* IF *)
+
+ (* initialise meta data *)
+ newStack^.arraySize := initialSize;
+ newStack^.entryCount := 0;
+ newStack^.overflow := NIL;
+
+ (* pass status and new stack to caller *)
+ status := success;
+ RETURN newStack
+
+END new;
+
+
+(* ---------------------------------------------------------------------------
+// function: LIFO.push( stack, value, status )
+// ---------------------------------------------------------------------------
+//
+// Adds a new entry <value> to the top of stack <stack>. The new entry is
+// added by reference, no data is copied. However, no entry is added if the
+// the stack is full, that is when the number of entries stored in the stack
+// has reached LIFO.maximumStackSize. The function fails if NIL is passed in
+// for <stack> or <value>, or if memory could not be allocated.
+//
+// New entries are allocated dynamically if the number of entries exceeds the
+// initial capacity of the stack.
+//
+// The status of the operation is passed back in <status>. *)
+
+PROCEDURE push ( VAR stack : Stack; value : DataPtr; VAR status : Status );
+VAR
+ newEntry : ListPtr;
+ valuePtr : POINTER TO DataPtr;
+
+BEGIN
+
+ (* bail out if stack is NIL *)
+ IF stack = NIL THEN
+ status := invalidStack;
+ RETURN;
+ END; (* IF *)
+
+ (* bail out if value is NIL *)
+ IF value = NIL THEN
+ status := invalidData;
+ RETURN;
+ END; (* IF *)
+
+ (* bail out if stack is full *)
+ IF stack^.entryCount >= maximumStackSize THEN
+ status := stackFull;
+ RETURN;
+ END; (* IF *)
+
+ (* check if index falls within array segment *)
+ IF stack^.entryCount < stack^.arraySize THEN
+
+ (* store value in array segment *)
+
+ (* stack^.array^[stack^.entryCount] := value; *)
+ valuePtr := ADR(stack^.array) + TSIZE(DataPtr) * stack^.entryCount;
+ valuePtr^ := value;
+
+ ELSE (* index falls within overflow segment *)
+
+ (* allocate new entry slot *)
+ NEW(newEntry);
+
+ (* bail out if allocation failed *)
+ IF newEntry = NIL THEN
+ status := allocationFailed;
+ RETURN;
+ END; (* IF *)
+
+ (* initialise new entry *)
+ newEntry^.value := value;
+
+ (* link new entry into overflow list *)
+ newEntry^.next := stack^.overflow;
+ stack^.overflow := newEntry;
+
+ END; (* IF *)
+
+ (* update entry counter *)
+ INC(stack^.entryCount);
+
+ (* pass status to caller *)
+ status := success;
+ RETURN
+
+END push;
+
+
+(* ---------------------------------------------------------------------------
+// function: LIFO.pop( stack, status )
+// ---------------------------------------------------------------------------
+//
+// Removes the top most value from stack <stack> and returns it. If the stack
+// is empty, that is when the number of entries stored in the stack has
+// reached zero, then NIL is returned.
+//
+// Entries which were allocated dynamically (above the initial capacity) are
+// deallocated when their values are popped.
+//
+// The status of the operation is passed back in <status>. *)
+
+PROCEDURE pop ( VAR stack : Stack; VAR status : Status ) : DataPtr;
+
+VAR
+ thisValue : DataPtr;
+ thisEntry : ListPtr;
+ valuePtr : POINTER TO DataPtr;
+
+BEGIN
+
+ (* bail out if stack is NIL *)
+ IF stack = NIL THEN
+ status := invalidStack;
+ RETURN NIL;
+ END; (* IF *)
+
+ (* bail out if stack is empty *)
+ IF stack^.entryCount = 0 THEN
+ status := stackEmpty;
+ RETURN NIL;
+ END; (* IF *)
+
+ DEC(stack^.entryCount);
+
+ (* check if index falls within array segment *)
+ IF stack^.entryCount < stack^.arraySize THEN
+
+ (* obtain value at index entryCount in array segment *)
+
+ (* thisValue := stack^.array^[stack^.entryCount]; *)
+ valuePtr := ADR(stack^.array) + TSIZE(DataPtr) * stack^.entryCount;
+ thisValue := valuePtr^;
+
+ ELSE (* index falls within overflow segment *)
+
+ (* obtain value of first entry in overflow list *)
+ thisValue := stack^.overflow^.value;
+
+ (* isolate first entry in overflow list *)
+ thisEntry := stack^.overflow;
+ stack^.overflow := stack^.overflow^.next;
+
+ (* remove the entry from overflow list *)
+ DISPOSE(thisEntry);
+
+ END; (* IF *)
+
+ (* return value and status to caller *)
+ status := success;
+ RETURN thisValue
+
+END pop;
+
+
+(* ---------------------------------------------------------------------------
+// function: LIFO.stackSize( stack )
+// ---------------------------------------------------------------------------
+//
+// Returns the current capacity of <stack>. The current capacity is the total
+// number of allocated entries. Returns zero if NIL is passed in for <stack>.
+*)
+PROCEDURE stackSize( VAR stack : Stack ) : StackSize;
+
+BEGIN
+
+ (* bail out if stack is NIL *)
+ IF stack = NIL THEN
+ RETURN 0;
+ END; (* IF *)
+
+ IF stack^.entryCount < stack^.arraySize THEN
+ RETURN stack^.arraySize;
+ ELSE
+ RETURN stack^.entryCount;
+ END; (* IF *)
+
+END stackSize;
+
+
+(* ---------------------------------------------------------------------------
+// function: LIFO.stackEntries( stack )
+// ---------------------------------------------------------------------------
+//
+// Returns the number of entries stored in stack <stack>, returns zero if
+// NIL is passed in for <stack>. *)
+
+PROCEDURE stackEntries( VAR stack : Stack ) : StackSize;
+
+BEGIN
+
+ (* bail out if stack is NIL *)
+ IF stack = NIL THEN
+ RETURN 0;
+ END; (* IF *)
+
+ RETURN stack^.entryCount
+
+END stackEntries;
+
+
+(* ---------------------------------------------------------------------------
+// function: LIFO.dispose( stack )
+// ---------------------------------------------------------------------------
+//
+// Disposes of LIFO stack object <stack>. Returns NIL. *)
+
+PROCEDURE dispose ( VAR stack : Stack ) : Stack;
+
+VAR
+ thisEntry : ListPtr;
+
+BEGIN
+
+ (* bail out if stack is NIL *)
+ IF stack = NIL THEN
+ RETURN NIL;
+ END; (* IF *)
+
+ (* deallocate any entries in stack's overflow list *)
+ WHILE stack^.overflow # NIL DO
+
+ (* isolate first entry in overflow list *)
+ thisEntry := stack^.overflow;
+ stack^.overflow := stack^.overflow^.next;
+
+ (* deallocate the entry *)
+ DISPOSE(thisEntry);
+
+ END; (* WHILE *)
+
+ (* deallocate stack object and pass NIL to caller *)
+ DEALLOCATE(stack, TSIZE(Stack) + TSIZE(DataPtr) * (stack^.arraySize - 1));
+ RETURN NIL
+
+END dispose;
+
+
+END LIFO.
diff --git a/tests/examplefiles/test.php b/tests/examplefiles/test.php
index 90de747..97e21f7 100644
--- a/tests/examplefiles/test.php
+++ b/tests/examplefiles/test.php
@@ -1,4 +1,7 @@
<?php
+
+$test = function($a) { $lambda = 1; }
+
/**
* Zip class file
*
@@ -12,7 +15,11 @@ if(!defined('UNLOCK') || !UNLOCK)
// Load the parent archive class
require_once(ROOT_PATH.'/classes/archive.class.php');
-
+
+class Zip\Zipp {
+
+}
+
/**
* Zip class
*
diff --git a/tests/examplefiles/underscore.coffee b/tests/examplefiles/underscore.coffee
new file mode 100644
index 0000000..a34a1ce
--- /dev/null
+++ b/tests/examplefiles/underscore.coffee
@@ -0,0 +1,603 @@
+
+ # Underscore.coffee
+ # (c) 2010 Jeremy Ashkenas, DocumentCloud Inc.
+ # Underscore is freely distributable under the terms of the MIT license.
+ # Portions of Underscore are inspired by or borrowed from Prototype.js,
+ # Oliver Steele's Functional, and John Resig's Micro-Templating.
+ # For all details and documentation:
+ # http://documentcloud.github.com/underscore/
+
+
+ # ------------------------- Baseline setup ---------------------------------
+
+ # Establish the root object, "window" in the browser, or "global" on the server.
+ root: this
+
+
+ # Save the previous value of the "_" variable.
+ previousUnderscore: root._
+
+
+ # If Underscore is called as a function, it returns a wrapped object that
+ # can be used OO-style. This wrapper holds altered versions of all the
+ # underscore functions. Wrapped objects may be chained.
+ wrapper: (obj) ->
+ this._wrapped: obj
+ this
+
+
+ # Establish the object that gets thrown to break out of a loop iteration.
+ breaker: if typeof(StopIteration) is 'undefined' then '__break__' else StopIteration
+
+
+ # Create a safe reference to the Underscore object forreference below.
+ _: root._: (obj) -> new wrapper(obj)
+
+
+ # Export the Underscore object for CommonJS.
+ if typeof(exports) != 'undefined' then exports._: _
+
+
+ # Create quick reference variables for speed access to core prototypes.
+ slice: Array::slice
+ unshift: Array::unshift
+ toString: Object::toString
+ hasOwnProperty: Object::hasOwnProperty
+ propertyIsEnumerable: Object::propertyIsEnumerable
+
+
+ # Current version.
+ _.VERSION: '0.5.7'
+
+
+ # ------------------------ Collection Functions: ---------------------------
+
+ # The cornerstone, an each implementation.
+ # Handles objects implementing forEach, arrays, and raw objects.
+ _.each: (obj, iterator, context) ->
+ index: 0
+ try
+ return obj.forEach(iterator, context) if obj.forEach
+ if _.isArray(obj) or _.isArguments(obj)
+ return iterator.call(context, obj[i], i, obj) for i in [0...obj.length]
+ iterator.call(context, val, key, obj) for key, val of obj
+ catch e
+ throw e if e isnt breaker
+ obj
+
+
+ # Return the results of applying the iterator to each element. Use JavaScript
+ # 1.6's version of map, if possible.
+ _.map: (obj, iterator, context) ->
+ return obj.map(iterator, context) if (obj and _.isFunction(obj.map))
+ results: []
+ _.each obj, (value, index, list) ->
+ results.push(iterator.call(context, value, index, list))
+ results
+
+
+ # Reduce builds up a single result from a list of values. Also known as
+ # inject, or foldl. Uses JavaScript 1.8's version of reduce, if possible.
+ _.reduce: (obj, memo, iterator, context) ->
+ return obj.reduce(_.bind(iterator, context), memo) if (obj and _.isFunction(obj.reduce))
+ _.each obj, (value, index, list) ->
+ memo: iterator.call(context, memo, value, index, list)
+ memo
+
+
+ # The right-associative version of reduce, also known as foldr. Uses
+ # JavaScript 1.8's version of reduceRight, if available.
+ _.reduceRight: (obj, memo, iterator, context) ->
+ return obj.reduceRight(_.bind(iterator, context), memo) if (obj and _.isFunction(obj.reduceRight))
+ _.each _.clone(_.toArray(obj)).reverse(), (value, index) ->
+ memo: iterator.call(context, memo, value, index, obj)
+ memo
+
+
+ # Return the first value which passes a truth test.
+ _.detect: (obj, iterator, context) ->
+ result: null
+ _.each obj, (value, index, list) ->
+ if iterator.call(context, value, index, list)
+ result: value
+ _.breakLoop()
+ result
+
+
+ # Return all the elements that pass a truth test. Use JavaScript 1.6's
+ # filter(), if it exists.
+ _.select: (obj, iterator, context) ->
+ if obj and _.isFunction(obj.filter) then return obj.filter(iterator, context)
+ results: []
+ _.each obj, (value, index, list) ->
+ results.push(value) if iterator.call(context, value, index, list)
+ results
+
+
+ # Return all the elements for which a truth test fails.
+ _.reject: (obj, iterator, context) ->
+ results: []
+ _.each obj, (value, index, list) ->
+ results.push(value) if not iterator.call(context, value, index, list)
+ results
+
+
+ # Determine whether all of the elements match a truth test. Delegate to
+ # JavaScript 1.6's every(), if it is present.
+ _.all: (obj, iterator, context) ->
+ iterator ||= _.identity
+ return obj.every(iterator, context) if obj and _.isFunction(obj.every)
+ result: true
+ _.each obj, (value, index, list) ->
+ _.breakLoop() unless (result: result and iterator.call(context, value, index, list))
+ result
+
+
+ # Determine if at least one element in the object matches a truth test. Use
+ # JavaScript 1.6's some(), if it exists.
+ _.any: (obj, iterator, context) ->
+ iterator ||= _.identity
+ return obj.some(iterator, context) if obj and _.isFunction(obj.some)
+ result: false
+ _.each obj, (value, index, list) ->
+ _.breakLoop() if (result: iterator.call(context, value, index, list))
+ result
+
+
+ # Determine if a given value is included in the array or object,
+ # based on '==='.
+ _.include: (obj, target) ->
+ return _.indexOf(obj, target) isnt -1 if _.isArray(obj)
+ for key, val of obj
+ return true if val is target
+ false
+
+
+ # Invoke a method with arguments on every item in a collection.
+ _.invoke: (obj, method) ->
+ args: _.rest(arguments, 2)
+ (if method then val[method] else val).apply(val, args) for val in obj
+
+
+ # Convenience version of a common use case of map: fetching a property.
+ _.pluck: (obj, key) ->
+ _.map(obj, ((val) -> val[key]))
+
+
+ # Return the maximum item or (item-based computation).
+ _.max: (obj, iterator, context) ->
+ return Math.max.apply(Math, obj) if not iterator and _.isArray(obj)
+ result: {computed: -Infinity}
+ _.each obj, (value, index, list) ->
+ computed: if iterator then iterator.call(context, value, index, list) else value
+ computed >= result.computed and (result: {value: value, computed: computed})
+ result.value
+
+
+ # Return the minimum element (or element-based computation).
+ _.min: (obj, iterator, context) ->
+ return Math.min.apply(Math, obj) if not iterator and _.isArray(obj)
+ result: {computed: Infinity}
+ _.each obj, (value, index, list) ->
+ computed: if iterator then iterator.call(context, value, index, list) else value
+ computed < result.computed and (result: {value: value, computed: computed})
+ result.value
+
+
+ # Sort the object's values by a criteria produced by an iterator.
+ _.sortBy: (obj, iterator, context) ->
+ _.pluck(((_.map obj, (value, index, list) ->
+ {value: value, criteria: iterator.call(context, value, index, list)}
+ ).sort((left, right) ->
+ a: left.criteria; b: right.criteria
+ if a < b then -1 else if a > b then 1 else 0
+ )), 'value')
+
+
+ # Use a comparator function to figure out at what index an object should
+ # be inserted so as to maintain order. Uses binary search.
+ _.sortedIndex: (array, obj, iterator) ->
+ iterator ||= _.identity
+ low: 0; high: array.length
+ while low < high
+ mid: (low + high) >> 1
+ if iterator(array[mid]) < iterator(obj) then low: mid + 1 else high: mid
+ low
+
+
+ # Convert anything iterable into a real, live array.
+ _.toArray: (iterable) ->
+ return [] if (!iterable)
+ return iterable.toArray() if (iterable.toArray)
+ return iterable if (_.isArray(iterable))
+ return slice.call(iterable) if (_.isArguments(iterable))
+ _.values(iterable)
+
+
+ # Return the number of elements in an object.
+ _.size: (obj) -> _.toArray(obj).length
+
+
+ # -------------------------- Array Functions: ------------------------------
+
+ # Get the first element of an array. Passing "n" will return the first N
+ # values in the array. Aliased as "head". The "guard" check allows it to work
+ # with _.map.
+ _.first: (array, n, guard) ->
+ if n and not guard then slice.call(array, 0, n) else array[0]
+
+
+ # Returns everything but the first entry of the array. Aliased as "tail".
+ # Especially useful on the arguments object. Passing an "index" will return
+ # the rest of the values in the array from that index onward. The "guard"
+ # check allows it to work with _.map.
+ _.rest: (array, index, guard) ->
+ slice.call(array, if _.isUndefined(index) or guard then 1 else index)
+
+
+ # Get the last element of an array.
+ _.last: (array) -> array[array.length - 1]
+
+
+ # Trim out all falsy values from an array.
+ _.compact: (array) -> array[i] for i in [0...array.length] when array[i]
+
+
+ # Return a completely flattened version of an array.
+ _.flatten: (array) ->
+ _.reduce array, [], (memo, value) ->
+ return memo.concat(_.flatten(value)) if _.isArray(value)
+ memo.push(value)
+ memo
+
+
+ # Return a version of the array that does not contain the specified value(s).
+ _.without: (array) ->
+ values: _.rest(arguments)
+ val for val in _.toArray(array) when not _.include(values, val)
+
+
+ # Produce a duplicate-free version of the array. If the array has already
+ # been sorted, you have the option of using a faster algorithm.
+ _.uniq: (array, isSorted) ->
+ memo: []
+ for el, i in _.toArray(array)
+ memo.push(el) if i is 0 || (if isSorted is true then _.last(memo) isnt el else not _.include(memo, el))
+ memo
+
+
+ # Produce an array that contains every item shared between all the
+ # passed-in arrays.
+ _.intersect: (array) ->
+ rest: _.rest(arguments)
+ _.select _.uniq(array), (item) ->
+ _.all rest, (other) ->
+ _.indexOf(other, item) >= 0
+
+
+ # Zip together multiple lists into a single array -- elements that share
+ # an index go together.
+ _.zip: ->
+ length: _.max(_.pluck(arguments, 'length'))
+ results: new Array(length)
+ for i in [0...length]
+ results[i]: _.pluck(arguments, String(i))
+ results
+
+
+ # If the browser doesn't supply us with indexOf (I'm looking at you, MSIE),
+ # we need this function. Return the position of the first occurence of an
+ # item in an array, or -1 if the item is not included in the array.
+ _.indexOf: (array, item) ->
+ return array.indexOf(item) if array.indexOf
+ i: 0; l: array.length
+ while l - i
+ if array[i] is item then return i else i++
+ -1
+
+
+ # Provide JavaScript 1.6's lastIndexOf, delegating to the native function,
+ # if possible.
+ _.lastIndexOf: (array, item) ->
+ return array.lastIndexOf(item) if array.lastIndexOf
+ i: array.length
+ while i
+ if array[i] is item then return i else i--
+ -1
+
+
+ # Generate an integer Array containing an arithmetic progression. A port of
+ # the native Python range() function. See:
+ # http://docs.python.org/library/functions.html#range
+ _.range: (start, stop, step) ->
+ a: arguments
+ solo: a.length <= 1
+ i: start: if solo then 0 else a[0];
+ stop: if solo then a[0] else a[1];
+ step: a[2] or 1
+ len: Math.ceil((stop - start) / step)
+ return [] if len <= 0
+ range: new Array(len)
+ idx: 0
+ while true
+ return range if (if step > 0 then i - stop else stop - i) >= 0
+ range[idx]: i
+ idx++
+ i+= step
+
+
+ # ----------------------- Function Functions: -----------------------------
+
+ # Create a function bound to a given object (assigning 'this', and arguments,
+ # optionally). Binding with arguments is also known as 'curry'.
+ _.bind: (func, obj) ->
+ args: _.rest(arguments, 2)
+ -> func.apply(obj or root, args.concat(arguments))
+
+
+ # Bind all of an object's methods to that object. Useful for ensuring that
+ # all callbacks defined on an object belong to it.
+ _.bindAll: (obj) ->
+ funcs: if arguments.length > 1 then _.rest(arguments) else _.functions(obj)
+ _.each(funcs, (f) -> obj[f]: _.bind(obj[f], obj))
+ obj
+
+
+ # Delays a function for the given number of milliseconds, and then calls
+ # it with the arguments supplied.
+ _.delay: (func, wait) ->
+ args: _.rest(arguments, 2)
+ setTimeout((-> func.apply(func, args)), wait)
+
+
+ # Defers a function, scheduling it to run after the current call stack has
+ # cleared.
+ _.defer: (func) ->
+ _.delay.apply(_, [func, 1].concat(_.rest(arguments)))
+
+
+ # Returns the first function passed as an argument to the second,
+ # allowing you to adjust arguments, run code before and after, and
+ # conditionally execute the original function.
+ _.wrap: (func, wrapper) ->
+ -> wrapper.apply(wrapper, [func].concat(arguments))
+
+
+ # Returns a function that is the composition of a list of functions, each
+ # consuming the return value of the function that follows.
+ _.compose: ->
+ funcs: arguments
+ ->
+ args: arguments
+ for i in [(funcs.length - 1)..0]
+ args: [funcs[i].apply(this, args)]
+ args[0]
+
+
+ # ------------------------- Object Functions: ----------------------------
+
+ # Retrieve the names of an object's properties.
+ _.keys: (obj) ->
+ return _.range(0, obj.length) if _.isArray(obj)
+ key for key, val of obj
+
+
+ # Retrieve the values of an object's properties.
+ _.values: (obj) ->
+ _.map(obj, _.identity)
+
+
+ # Return a sorted list of the function names available in Underscore.
+ _.functions: (obj) ->
+ _.select(_.keys(obj), (key) -> _.isFunction(obj[key])).sort()
+
+
+ # Extend a given object with all of the properties in a source object.
+ _.extend: (destination, source) ->
+ for key, val of source
+ destination[key]: val
+ destination
+
+
+ # Create a (shallow-cloned) duplicate of an object.
+ _.clone: (obj) ->
+ return obj.slice(0) if _.isArray(obj)
+ _.extend({}, obj)
+
+
+ # Invokes interceptor with the obj, and then returns obj.
+ # The primary purpose of this method is to "tap into" a method chain, in order to perform operations on intermediate results within the chain.
+ _.tap: (obj, interceptor) ->
+ interceptor(obj)
+ obj
+
+
+ # Perform a deep comparison to check if two objects are equal.
+ _.isEqual: (a, b) ->
+ # Check object identity.
+ return true if a is b
+ # Different types?
+ atype: typeof(a); btype: typeof(b)
+ return false if atype isnt btype
+ # Basic equality test (watch out for coercions).
+ return true if `a == b`
+ # One is falsy and the other truthy.
+ return false if (!a and b) or (a and !b)
+ # One of them implements an isEqual()?
+ return a.isEqual(b) if a.isEqual
+ # Check dates' integer values.
+ return a.getTime() is b.getTime() if _.isDate(a) and _.isDate(b)
+ # Both are NaN?
+ return true if _.isNaN(a) and _.isNaN(b)
+ # Compare regular expressions.
+ if _.isRegExp(a) and _.isRegExp(b)
+ return a.source is b.source and
+ a.global is b.global and
+ a.ignoreCase is b.ignoreCase and
+ a.multiline is b.multiline
+ # If a is not an object by this point, we can't handle it.
+ return false if atype isnt 'object'
+ # Check for different array lengths before comparing contents.
+ return false if a.length and (a.length isnt b.length)
+ # Nothing else worked, deep compare the contents.
+ aKeys: _.keys(a); bKeys: _.keys(b)
+ # Different object sizes?
+ return false if aKeys.length isnt bKeys.length
+ # Recursive comparison of contents.
+ # for (var key in a) if (!_.isEqual(a[key], b[key])) return false;
+ return true
+
+
+ # Is a given array or object empty?
+ _.isEmpty: (obj) -> _.keys(obj).length is 0
+
+
+ # Is a given value a DOM element?
+ _.isElement: (obj) -> obj and obj.nodeType is 1
+
+
+ # Is a given value an array?
+ _.isArray: (obj) -> !!(obj and obj.concat and obj.unshift)
+
+
+ # Is a given variable an arguments object?
+ _.isArguments: (obj) -> obj and _.isNumber(obj.length) and not obj.concat and
+ not obj.substr and not obj.apply and not propertyIsEnumerable.call(obj, 'length')
+
+
+ # Is the given value a function?
+ _.isFunction: (obj) -> !!(obj and obj.constructor and obj.call and obj.apply)
+
+
+ # Is the given value a string?
+ _.isString: (obj) -> !!(obj is '' or (obj and obj.charCodeAt and obj.substr))
+
+
+ # Is a given value a number?
+ _.isNumber: (obj) -> (obj is +obj) or toString.call(obj) is '[object Number]'
+
+
+ # Is a given value a Date?
+ _.isDate: (obj) -> !!(obj and obj.getTimezoneOffset and obj.setUTCFullYear)
+
+
+ # Is the given value a regular expression?
+ _.isRegExp: (obj) -> !!(obj and obj.exec and (obj.ignoreCase or obj.ignoreCase is false))
+
+
+ # Is the given value NaN -- this one is interesting. NaN != NaN, and
+ # isNaN(undefined) == true, so we make sure it's a number first.
+ _.isNaN: (obj) -> _.isNumber(obj) and window.isNaN(obj)
+
+
+ # Is a given value equal to null?
+ _.isNull: (obj) -> obj is null
+
+
+ # Is a given variable undefined?
+ _.isUndefined: (obj) -> typeof obj is 'undefined'
+
+
+ # -------------------------- Utility Functions: --------------------------
+
+ # Run Underscore.js in noConflict mode, returning the '_' variable to its
+ # previous owner. Returns a reference to the Underscore object.
+ _.noConflict: ->
+ root._: previousUnderscore
+ this
+
+
+ # Keep the identity function around for default iterators.
+ _.identity: (value) -> value
+
+
+ # Break out of the middle of an iteration.
+ _.breakLoop: -> throw breaker
+
+
+ # Generate a unique integer id (unique within the entire client session).
+ # Useful for temporary DOM ids.
+ idCounter: 0
+ _.uniqueId: (prefix) ->
+ (prefix or '') + idCounter++
+
+
+ # By default, Underscore uses ERB-style template delimiters, change the
+ # following template settings to use alternative delimiters.
+ _.templateSettings: {
+ start: '<%'
+ end: '%>'
+ interpolate: /<%=(.+?)%>/g
+ }
+
+
+ # JavaScript templating a-la ERB, pilfered from John Resig's
+ # "Secrets of the JavaScript Ninja", page 83.
+ # Single-quotea fix from Rick Strahl's version.
+ _.template: (str, data) ->
+ c: _.templateSettings
+ fn: new Function 'obj',
+ 'var p=[],print=function(){p.push.apply(p,arguments);};' +
+ 'with(obj){p.push(\'' +
+ str.replace(/[\r\t\n]/g, " ")
+ .replace(new RegExp("'(?=[^"+c.end[0]+"]*"+c.end+")","g"),"\t")
+ .split("'").join("\\'")
+ .split("\t").join("'")
+ .replace(c.interpolate, "',$1,'")
+ .split(c.start).join("');")
+ .split(c.end).join("p.push('") +
+ "');}return p.join('');"
+ if data then fn(data) else fn
+
+
+ # ------------------------------- Aliases ----------------------------------
+
+ _.forEach: _.each
+ _.foldl: _.inject: _.reduce
+ _.foldr: _.reduceRight
+ _.filter: _.select
+ _.every: _.all
+ _.some: _.any
+ _.head: _.first
+ _.tail: _.rest
+ _.methods: _.functions
+
+
+ # /*------------------------ Setup the OOP Wrapper: --------------------------*/
+
+ # Helper function to continue chaining intermediate results.
+ result: (obj, chain) ->
+ if chain then _(obj).chain() else obj
+
+
+ # Add all of the Underscore functions to the wrapper object.
+ _.each _.functions(_), (name) ->
+ method: _[name]
+ wrapper.prototype[name]: ->
+ unshift.call(arguments, this._wrapped)
+ result(method.apply(_, arguments), this._chain)
+
+
+ # Add all mutator Array functions to the wrapper.
+ _.each ['pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift'], (name) ->
+ method: Array.prototype[name]
+ wrapper.prototype[name]: ->
+ method.apply(this._wrapped, arguments)
+ result(this._wrapped, this._chain)
+
+
+ # Add all accessor Array functions to the wrapper.
+ _.each ['concat', 'join', 'slice'], (name) ->
+ method: Array.prototype[name]
+ wrapper.prototype[name]: ->
+ result(method.apply(this._wrapped, arguments), this._chain)
+
+
+ # Start chaining a wrapped Underscore object.
+ wrapper::chain: ->
+ this._chain: true
+ this
+
+
+ # Extracts the result from a wrapped and chained object.
+ wrapper::value: -> this._wrapped
diff --git a/tests/examplefiles/example.xml b/tests/examplefiles/xml_example
index e657e56..e657e56 100644
--- a/tests/examplefiles/example.xml
+++ b/tests/examplefiles/xml_example
diff --git a/tests/support.pyc b/tests/support.pyc
deleted file mode 100644
index 8aff198..0000000
--- a/tests/support.pyc
+++ /dev/null
Binary files differ
diff --git a/tests/test_basic_api.py b/tests/test_basic_api.py
index 44a656b..b51d9b5 100644
--- a/tests/test_basic_api.py
+++ b/tests/test_basic_api.py
@@ -8,8 +8,8 @@
"""
import os
-import unittest
import random
+import unittest
from pygments import lexers, formatters, filters, format
from pygments.token import _TokenType, Text
@@ -25,63 +25,192 @@ test_content = [chr(i) for i in xrange(33, 128)] * 5
random.shuffle(test_content)
test_content = ''.join(test_content) + '\n'
-class LexersTest(unittest.TestCase):
-
- def test_import_all(self):
- # instantiate every lexer, to see if the token type defs are correct
- for x in lexers.LEXERS.keys():
- c = getattr(lexers, x)()
-
- def test_lexer_classes(self):
- a = self.assert_
- ae = self.assertEquals
- # test that every lexer class has the correct public API
- for lexer in lexers._iter_lexerclasses():
- a(type(lexer.name) is str)
- for attr in 'aliases', 'filenames', 'alias_filenames', 'mimetypes':
- a(hasattr(lexer, attr))
- a(type(getattr(lexer, attr)) is list, "%s: %s attribute wrong" %
- (lexer, attr))
- result = lexer.analyse_text("abc")
- a(isinstance(result, float) and 0.0 <= result <= 1.0)
-
- inst = lexer(opt1="val1", opt2="val2")
- if issubclass(lexer, RegexLexer):
- if not hasattr(lexer, '_tokens'):
- # if there's no "_tokens", the lexer has to be one with
- # multiple tokendef variants
- a(lexer.token_variants)
- for variant in lexer.tokens:
- a('root' in lexer.tokens[variant])
- else:
- a('root' in lexer._tokens, '%s has no root state' % lexer)
-
- tokens = list(inst.get_tokens(test_content))
- txt = ""
- for token in tokens:
- a(isinstance(token, tuple))
- a(isinstance(token[0], _TokenType))
- if isinstance(token[1], str):
- print repr(token[1])
- a(isinstance(token[1], unicode))
- txt += token[1]
- ae(txt, test_content, "%s lexer roundtrip failed: %r != %r" %
- (lexer.name, test_content, txt))
-
- def test_get_lexers(self):
- a = self.assert_
- ae = self.assertEquals
- # test that the lexers functions work
-
- for func, args in [(lexers.get_lexer_by_name, ("python",)),
- (lexers.get_lexer_for_filename, ("test.py",)),
- (lexers.get_lexer_for_mimetype, ("text/x-python",)),
- (lexers.guess_lexer, ("#!/usr/bin/python -O\nprint",)),
- (lexers.guess_lexer_for_filename, ("a.py", "<%= @foo %>"))
- ]:
- x = func(opt="val", *args)
- a(isinstance(x, lexers.PythonLexer))
- ae(x.options["opt"], "val")
+
+def test_lexer_import_all():
+ # instantiate every lexer, to see if the token type defs are correct
+ for x in lexers.LEXERS.keys():
+ c = getattr(lexers, x)()
+
+
+def test_lexer_classes():
+ # test that every lexer class has the correct public API
+ def verify(cls):
+ assert type(cls.name) is str
+ for attr in 'aliases', 'filenames', 'alias_filenames', 'mimetypes':
+ assert hasattr(cls, attr)
+ assert type(getattr(cls, attr)) is list, \
+ "%s: %s attribute wrong" % (cls, attr)
+ result = cls.analyse_text("abc")
+ assert isinstance(result, float) and 0.0 <= result <= 1.0
+
+ inst = cls(opt1="val1", opt2="val2")
+ if issubclass(cls, RegexLexer):
+ if not hasattr(cls, '_tokens'):
+ # if there's no "_tokens", the lexer has to be one with
+ # multiple tokendef variants
+ assert cls.token_variants
+ for variant in cls.tokens:
+ assert 'root' in cls.tokens[variant]
+ else:
+ assert 'root' in cls._tokens, \
+ '%s has no root state' % cls
+
+ tokens = list(inst.get_tokens(test_content))
+ txt = ""
+ for token in tokens:
+ assert isinstance(token, tuple)
+ assert isinstance(token[0], _TokenType)
+ if isinstance(token[1], str):
+ print repr(token[1])
+ assert isinstance(token[1], unicode)
+ txt += token[1]
+ assert txt == test_content, "%s lexer roundtrip failed: %r != %r" % \
+ (cls.name, test_content, txt)
+
+ for lexer in lexers._iter_lexerclasses():
+ yield verify, lexer
+
+
+def test_lexer_options():
+ # test that the basic options work
+ def ensure(tokens, output):
+ concatenated = ''.join(token[1] for token in tokens)
+ assert concatenated == output, \
+ '%s: %r != %r' % (lexer, concatenated, output)
+ def verify(cls):
+ inst = cls(stripnl=False)
+ ensure(inst.get_tokens('a\nb'), 'a\nb\n')
+ ensure(inst.get_tokens('\n\n\n'), '\n\n\n')
+ inst = cls(stripall=True)
+ ensure(inst.get_tokens(' \n b\n\n\n'), 'b\n')
+ # some lexers require full lines in input
+ if cls.__name__ not in (
+ 'PythonConsoleLexer', 'RConsoleLexer', 'RubyConsoleLexer',
+ 'SqliteConsoleLexer', 'MatlabSessionLexer', 'ErlangShellLexer',
+ 'BashSessionLexer', 'LiterateHaskellLexer'):
+ inst = cls(ensurenl=False)
+ ensure(inst.get_tokens('a\nb'), 'a\nb')
+ inst = cls(ensurenl=False, stripall=True)
+ ensure(inst.get_tokens('a\nb\n\n'), 'a\nb')
+
+ for lexer in lexers._iter_lexerclasses():
+ if lexer.__name__ == 'RawTokenLexer':
+ # this one is special
+ continue
+ yield verify, lexer
+
+
+def test_get_lexers():
+ # test that the lexers functions work
+ def verify(func, args):
+ x = func(opt='val', *args)
+ assert isinstance(x, lexers.PythonLexer)
+ assert x.options["opt"] == "val"
+
+ for func, args in [(lexers.get_lexer_by_name, ("python",)),
+ (lexers.get_lexer_for_filename, ("test.py",)),
+ (lexers.get_lexer_for_mimetype, ("text/x-python",)),
+ (lexers.guess_lexer, ("#!/usr/bin/python -O\nprint",)),
+ (lexers.guess_lexer_for_filename, ("a.py", "<%= @foo %>"))
+ ]:
+ yield verify, func, args
+
+
+def test_formatter_public_api():
+ ts = list(lexers.PythonLexer().get_tokens("def f(): pass"))
+ out = StringIO()
+ # test that every formatter class has the correct public API
+ def verify(formatter, info):
+ assert len(info) == 4
+ assert info[0], "missing formatter name"
+ assert info[1], "missing formatter aliases"
+ assert info[3], "missing formatter docstring"
+
+ if formatter.name == 'Raw tokens':
+ # will not work with Unicode output file
+ return
+
+ try:
+ inst = formatter(opt1="val1")
+ except (ImportError, FontNotFound):
+ return
+ try:
+ inst.get_style_defs()
+ except NotImplementedError:
+ # may be raised by formatters for which it doesn't make sense
+ pass
+ inst.format(ts, out)
+
+ for formatter, info in formatters.FORMATTERS.iteritems():
+ yield verify, formatter, info
+
+def test_formatter_encodings():
+ from pygments.formatters import HtmlFormatter
+
+ # unicode output
+ fmt = HtmlFormatter()
+ tokens = [(Text, u"ä")]
+ out = format(tokens, fmt)
+ assert type(out) is unicode
+ assert u"ä" in out
+
+ # encoding option
+ fmt = HtmlFormatter(encoding="latin1")
+ tokens = [(Text, u"ä")]
+ assert u"ä".encode("latin1") in format(tokens, fmt)
+
+ # encoding and outencoding option
+ fmt = HtmlFormatter(encoding="latin1", outencoding="utf8")
+ tokens = [(Text, u"ä")]
+ assert u"ä".encode("utf8") in format(tokens, fmt)
+
+
+def test_formatter_unicode_handling():
+ # test that the formatter supports encoding and Unicode
+ tokens = list(lexers.PythonLexer(encoding='utf-8').
+ get_tokens("def f(): 'ä'"))
+
+ def verify(formatter):
+ try:
+ inst = formatter(encoding=None)
+ except (ImportError, FontNotFound):
+ # some dependency or font not installed
+ return
+
+ if formatter.name != 'Raw tokens':
+ out = format(tokens, inst)
+ if formatter.unicodeoutput:
+ assert type(out) is unicode
+
+ inst = formatter(encoding='utf-8')
+ out = format(tokens, inst)
+ assert type(out) is bytes, '%s: %r' % (formatter, out)
+ # Cannot test for encoding, since formatters may have to escape
+ # non-ASCII characters.
+ else:
+ inst = formatter()
+ out = format(tokens, inst)
+ assert type(out) is bytes, '%s: %r' % (formatter, out)
+
+ for formatter, info in formatters.FORMATTERS.iteritems():
+ yield verify, formatter
+
+
+def test_get_formatters():
+ # test that the formatters functions work
+ x = formatters.get_formatter_by_name("html", opt="val")
+ assert isinstance(x, formatters.HtmlFormatter)
+ assert x.options["opt"] == "val"
+
+ x = formatters.get_formatter_for_filename("a.html", opt="val")
+ assert isinstance(x, formatters.HtmlFormatter)
+ assert x.options["opt"] == "val"
+
+
+def test_styles():
+ # minimal style test
+ from pygments.formatters import HtmlFormatter
+ fmt = HtmlFormatter(style="pastie")
class FiltersTest(unittest.TestCase):
@@ -136,95 +265,3 @@ class FiltersTest(unittest.TestCase):
text = u'# DEBUG: text'
tokens = list(lx.get_tokens(text))
self.assertEquals('# DEBUG: text', tokens[0][1])
-
-
-class FormattersTest(unittest.TestCase):
-
- def test_public_api(self):
- a = self.assert_
- ae = self.assertEquals
- ts = list(lexers.PythonLexer().get_tokens("def f(): pass"))
- out = StringIO()
- # test that every formatter class has the correct public API
- for formatter, info in formatters.FORMATTERS.iteritems():
- a(len(info) == 4)
- a(info[0], "missing formatter name") # name
- a(info[1], "missing formatter aliases") # aliases
- a(info[3], "missing formatter docstring") # doc
-
- if formatter.name == 'Raw tokens':
- # will not work with Unicode output file
- continue
-
- try:
- inst = formatter(opt1="val1")
- except (ImportError, FontNotFound):
- continue
- try:
- inst.get_style_defs()
- except NotImplementedError:
- # may be raised by formatters for which it doesn't make sense
- pass
- inst.format(ts, out)
-
- def test_encodings(self):
- from pygments.formatters import HtmlFormatter
-
- # unicode output
- fmt = HtmlFormatter()
- tokens = [(Text, u"ä")]
- out = format(tokens, fmt)
- self.assert_(type(out) is unicode)
- self.assert_(u"ä" in out)
-
- # encoding option
- fmt = HtmlFormatter(encoding="latin1")
- tokens = [(Text, u"ä")]
- self.assert_(u"ä".encode("latin1") in format(tokens, fmt))
-
- # encoding and outencoding option
- fmt = HtmlFormatter(encoding="latin1", outencoding="utf8")
- tokens = [(Text, u"ä")]
- self.assert_(u"ä".encode("utf8") in format(tokens, fmt))
-
- def test_styles(self):
- from pygments.formatters import HtmlFormatter
- fmt = HtmlFormatter(style="pastie")
-
- def test_unicode_handling(self):
- # test that the formatter supports encoding and Unicode
- tokens = list(lexers.PythonLexer(encoding='utf-8').
- get_tokens("def f(): 'ä'"))
- for formatter, info in formatters.FORMATTERS.iteritems():
- try:
- inst = formatter(encoding=None)
- except (ImportError, FontNotFound):
- # some dependency or font not installed
- continue
-
- if formatter.name != 'Raw tokens':
- out = format(tokens, inst)
- if formatter.unicodeoutput:
- self.assert_(type(out) is unicode)
-
- inst = formatter(encoding='utf-8')
- out = format(tokens, inst)
- self.assert_(type(out) is bytes, '%s: %r' % (formatter, out))
- # Cannot test for encoding, since formatters may have to escape
- # non-ASCII characters.
- else:
- inst = formatter()
- out = format(tokens, inst)
- self.assert_(type(out) is bytes, '%s: %r' % (formatter, out))
-
- def test_get_formatters(self):
- a = self.assert_
- ae = self.assertEquals
- # test that the formatters functions work
- x = formatters.get_formatter_by_name("html", opt="val")
- a(isinstance(x, formatters.HtmlFormatter))
- ae(x.options["opt"], "val")
-
- x = formatters.get_formatter_for_filename("a.html", opt="val")
- a(isinstance(x, formatters.HtmlFormatter))
- ae(x.options["opt"], "val")
diff --git a/tests/test_basic_api.pyc b/tests/test_basic_api.pyc
deleted file mode 100644
index 7369726..0000000
--- a/tests/test_basic_api.pyc
+++ /dev/null
Binary files differ
diff --git a/tests/test_clexer.pyc b/tests/test_clexer.pyc
deleted file mode 100644
index 57e6655..0000000
--- a/tests/test_clexer.pyc
+++ /dev/null
Binary files differ
diff --git a/tests/test_cmdline.pyc b/tests/test_cmdline.pyc
deleted file mode 100644
index e44c808..0000000
--- a/tests/test_cmdline.pyc
+++ /dev/null
Binary files differ
diff --git a/tests/test_examplefiles.py b/tests/test_examplefiles.py
index d56eb7c..691ae92 100644
--- a/tests/test_examplefiles.py
+++ b/tests/test_examplefiles.py
@@ -8,9 +8,7 @@
"""
import os
-import unittest
-from pygments import highlight
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
from pygments.token import Error
from pygments.util import ClassNotFound, b
diff --git a/tests/test_examplefiles.pyc b/tests/test_examplefiles.pyc
deleted file mode 100644
index 9e7f25c..0000000
--- a/tests/test_examplefiles.pyc
+++ /dev/null
Binary files differ
diff --git a/tests/test_html_formatter.py b/tests/test_html_formatter.py
index 9ff593f..ae54b91 100644
--- a/tests/test_html_formatter.py
+++ b/tests/test_html_formatter.py
@@ -12,17 +12,19 @@ import re
import unittest
import StringIO
import tempfile
-from os.path import join, dirname, isfile, abspath
+from os.path import join, dirname, isfile
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter, NullFormatter
from pygments.formatters.html import escape_html
+from pygments.util import uni_open
import support
TESTFILE, TESTDIR = support.location(__file__)
-tokensource = list(PythonLexer(encoding='utf-8').get_tokens(open(TESTFILE).read()))
+tokensource = list(PythonLexer().get_tokens(
+ uni_open(TESTFILE, encoding='utf-8').read()))
class HtmlFormatterTest(unittest.TestCase):
diff --git a/tests/test_html_formatter.pyc b/tests/test_html_formatter.pyc
deleted file mode 100644
index 4776a45..0000000
--- a/tests/test_html_formatter.pyc
+++ /dev/null
Binary files differ
diff --git a/tests/test_latex_formatter.pyc b/tests/test_latex_formatter.pyc
deleted file mode 100644
index de8f73d..0000000
--- a/tests/test_latex_formatter.pyc
+++ /dev/null
Binary files differ
diff --git a/tests/test_regexlexer.pyc b/tests/test_regexlexer.pyc
deleted file mode 100644
index 2bcf10d..0000000
--- a/tests/test_regexlexer.pyc
+++ /dev/null
Binary files differ
diff --git a/tests/test_token.pyc b/tests/test_token.pyc
deleted file mode 100644
index 30c8d69..0000000
--- a/tests/test_token.pyc
+++ /dev/null
Binary files differ
diff --git a/tests/test_using_api.pyc b/tests/test_using_api.pyc
deleted file mode 100644
index d070c23..0000000
--- a/tests/test_using_api.pyc
+++ /dev/null
Binary files differ
diff --git a/tests/test_util.pyc b/tests/test_util.pyc
deleted file mode 100644
index ea024d1..0000000
--- a/tests/test_util.pyc
+++ /dev/null
Binary files differ